summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h153
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp1284
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h220
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h26
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp1170
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp338
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h36
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h37
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp1122
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h166
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp1325
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp1644
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h224
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp830
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp424
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp752
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp2084
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp871
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp979
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp742
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp1181
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp2006
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp842
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp2273
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp5910
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h221
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp827
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h222
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp786
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp1179
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp160
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp487
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp3133
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h364
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGValue.h308
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt37
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp835
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h1390
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp2006
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h577
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp501
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h202
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h113
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Makefile25
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp2201
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.h171
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp107
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/README.txt47
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp2252
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h98
51 files changed, 44927 insertions, 0 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..1ab2f55
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,153 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_ABIINFO_H
+#define CLANG_CODEGEN_ABIINFO_H
+
+#include "clang/AST/Type.h"
+
+#include <cassert>
+
+namespace llvm {
+ class Type;
+ class Value;
+ class LLVMContext;
+}
+
+namespace clang {
+ class ASTContext;
+
+ // FIXME: This is a layering issue if we want to move ABIInfo
+ // down. Fortunately CGFunctionInfo has no real tie to CodeGen.
+ namespace CodeGen {
+ class CGFunctionInfo;
+ class CodeGenFunction;
+ }
+
+ /* FIXME: All of this stuff should be part of the target interface
+ somehow. It is currently here because it is not clear how to factor
+ the targets to support this, since the Targets currently live in a
+ layer below types n'stuff.
+ */
+
+ /// ABIArgInfo - Helper class to encapsulate information about how a
+ /// specific C type should be passed to or returned from a function.
+ class ABIArgInfo {
+ public:
+ enum Kind {
+ Direct, /// Pass the argument directly using the normal
+ /// converted LLVM type. Complex and structure types
+ /// are passed using first class aggregates.
+
+ Extend, /// Valid only for integer argument types. Same as 'direct'
+ /// but also emit a zero/sign extension attribute.
+
+ Indirect, /// Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default
+ /// alignment).
+
+ Ignore, /// Ignore the argument (treat as void). Useful for
+ /// void and empty structs.
+
+ Coerce, /// Only valid for aggregate return types, the argument
+ /// should be accessed by coercion to a provided type.
+
+ Expand, /// Only valid for aggregate argument types. The
+ /// structure should be expanded into consecutive
+ /// arguments for its constituent fields. Currently
+ /// expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable
+ /// types.
+
+ KindFirst=Direct, KindLast=Expand
+ };
+
+ private:
+ Kind TheKind;
+ const llvm::Type *TypeData;
+ unsigned UIntData;
+ bool BoolData;
+
+ ABIArgInfo(Kind K, const llvm::Type *TD=0,
+ unsigned UI=0, bool B = false)
+ : TheKind(K), TypeData(TD), UIntData(UI), BoolData(B) {}
+
+ public:
+ ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
+
+ static ABIArgInfo getDirect() {
+ return ABIArgInfo(Direct);
+ }
+ static ABIArgInfo getExtend() {
+ return ABIArgInfo(Extend);
+ }
+ static ABIArgInfo getIgnore() {
+ return ABIArgInfo(Ignore);
+ }
+ static ABIArgInfo getCoerce(const llvm::Type *T) {
+ return ABIArgInfo(Coerce, T);
+ }
+ static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal);
+ }
+ static ABIArgInfo getExpand() {
+ return ABIArgInfo(Expand);
+ }
+
+ Kind getKind() const { return TheKind; }
+ bool isDirect() const { return TheKind == Direct; }
+ bool isExtend() const { return TheKind == Extend; }
+ bool isIgnore() const { return TheKind == Ignore; }
+ bool isCoerce() const { return TheKind == Coerce; }
+ bool isIndirect() const { return TheKind == Indirect; }
+ bool isExpand() const { return TheKind == Expand; }
+
+ // Coerce accessors
+ const llvm::Type *getCoerceToType() const {
+ assert(TheKind == Coerce && "Invalid kind!");
+ return TypeData;
+ }
+
+ // Indirect accessors
+ unsigned getIndirectAlign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return UIntData;
+ }
+
+ bool getIndirectByVal() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData;
+ }
+
+ void dump() const;
+ };
+
+ /// ABIInfo - Target specific hooks for defining how a type should be
+ /// passed or returned from functions.
+ class ABIInfo {
+ public:
+ virtual ~ABIInfo();
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
+ ASTContext &Ctx,
+ llvm::LLVMContext &VMContext) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGen::CodeGenFunction &CGF) const = 0;
+ };
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..de58597
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,1284 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Target/TargetData.h"
+#include <algorithm>
+
+using namespace clang;
+using namespace CodeGen;
+
+/// CGBlockInfo - Information to generate a block literal.
+class clang::CodeGen::CGBlockInfo {
+public:
+ /// Name - The name of the block, kindof.
+ const char *Name;
+
+ /// DeclRefs - Variables from parent scopes that have been
+ /// imported into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
+
+ /// InnerBlocks - This block and the blocks it encloses.
+ llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
+
+ /// CXXThisRef - Non-null if 'this' was required somewhere, in
+ /// which case this is that expression.
+ const CXXThisExpr *CXXThisRef;
+
+ /// NeedsObjCSelf - True if something in this block has an implicit
+ /// reference to 'self'.
+ bool NeedsObjCSelf;
+
+ /// These are initialized by GenerateBlockFunction.
+ bool BlockHasCopyDispose;
+ CharUnits BlockSize;
+ CharUnits BlockAlign;
+ llvm::SmallVector<const Expr*, 8> BlockLayout;
+
+ CGBlockInfo(const char *Name);
+};
+
+CGBlockInfo::CGBlockInfo(const char *N)
+ : Name(N), CXXThisRef(0), NeedsObjCSelf(false) {
+
+ // Skip asm prefix, if any.
+ if (Name && Name[0] == '\01')
+ ++Name;
+}
+
+
+llvm::Constant *CodeGenFunction::
+BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnits Size,
+ const llvm::StructType* Ty,
+ std::vector<HelperInfo> *NoteForHelper) {
+ const llvm::Type *UnsignedLongTy
+ = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *C;
+ std::vector<llvm::Constant*> Elts;
+
+ // reserved
+ C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+ Elts.push_back(C);
+
+ // Size
+ // FIXME: What is the right way to say this doesn't fit? We should give
+ // a user diagnostic in that case. Better fix would be to change the
+ // API to size_t.
+ C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity());
+ Elts.push_back(C);
+
+ // optional copy/dispose helpers
+ if (BlockHasCopyDispose) {
+ // copy_func_helper_decl
+ Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
+
+ // destroy_func_decl
+ Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
+ }
+
+ // Signature. non-optional ObjC-style method descriptor @encode sequence
+ std::string BlockTypeEncoding;
+ CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+
+ Elts.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty));
+
+ // Layout.
+ C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+ Elts.push_back(C);
+
+ C = llvm::ConstantStruct::get(VMContext, Elts, false);
+
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, "__block_descriptor_tmp");
+ return C;
+}
+
+llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock == 0)
+ NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ "_NSConcreteGlobalBlock");
+ return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *BlockModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock == 0)
+ NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ "_NSConcreteStackBlock");
+ return NSConcreteStackBlock;
+}
+
+static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) {
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (*I)
+ CollectBlockDeclRefInfo(*I, Info);
+
+ // We want to ensure we walk down into block literals so we can find
+ // all nested BlockDeclRefExprs.
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ Info.InnerBlocks.insert(BE->getBlockDecl());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
+ }
+
+ else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
+ const ValueDecl *D = BDRE->getDecl();
+ // FIXME: Handle enums.
+ if (isa<FunctionDecl>(D))
+ return;
+
+ if (isa<ImplicitParamDecl>(D) &&
+ isa<ObjCMethodDecl>(D->getDeclContext()) &&
+ cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) {
+ Info.NeedsObjCSelf = true;
+ return;
+ }
+
+ // Only Decls that escape are added.
+ if (!Info.InnerBlocks.count(D->getDeclContext()))
+ Info.DeclRefs.push_back(BDRE);
+ }
+
+ // Make sure to capture implicit 'self' references due to super calls.
+ else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) {
+ if (E->getReceiverKind() == ObjCMessageExpr::SuperClass ||
+ E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ Info.NeedsObjCSelf = true;
+ }
+
+ // Getter/setter uses may also cause implicit super references,
+ // which we can check for with:
+ else if (isa<ObjCSuperExpr>(S))
+ Info.NeedsObjCSelf = true;
+
+ else if (isa<CXXThisExpr>(S))
+ Info.CXXThisRef = cast<CXXThisExpr>(S);
+}
+
+/// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be
+/// declared as a global variable instead of on the stack.
+static bool CanBlockBeGlobal(const CGBlockInfo &Info) {
+ return Info.DeclRefs.empty();
+}
+
+/// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to
+/// ensure we can generate the debug information for the parameter for the block
+/// invoke function.
+static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) {
+ if (Info.CXXThisRef)
+ CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef);
+
+ for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
+ CGF.AllocateBlockDecl(Info.DeclRefs[i]);
+
+ if (Info.NeedsObjCSelf) {
+ ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl();
+ BlockDeclRefExpr *BDRE =
+ new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(),
+ SourceLocation(), false);
+ Info.DeclRefs.push_back(BDRE);
+ CGF.AllocateBlockDecl(BDRE);
+ }
+}
+
+// FIXME: Push most into CGM, passing down a few bits, like current function
+// name.
+llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
+ std::string Name = CurFn->getName();
+ CGBlockInfo Info(Name.c_str());
+ Info.InnerBlocks.insert(BE->getBlockDecl());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
+
+ // Check if the block can be global.
+ // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
+ // to just have one code path. We should move this function into CGM and pass
+ // CGF, then we can just check to see if CGF is 0.
+ if (0 && CanBlockBeGlobal(Info))
+ return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
+
+ size_t BlockFields = 5;
+
+ std::vector<llvm::Constant*> Elts(BlockFields);
+
+ llvm::Constant *C;
+ llvm::Value *V;
+
+ {
+ // C = BuildBlockStructInitlist();
+ unsigned int flags = BLOCK_HAS_SIGNATURE;
+
+ // We run this first so that we set BlockHasCopyDispose from the entire
+ // block literal.
+ // __invoke
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
+ LocalDeclMap);
+ BlockHasCopyDispose |= Info.BlockHasCopyDispose;
+ Elts[3] = Fn;
+
+ // FIXME: Don't use BlockHasCopyDispose, it is set more often then
+ // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
+ if (Info.BlockHasCopyDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+
+ // __isa
+ C = CGM.getNSConcreteStackBlock();
+ C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
+ Elts[0] = C;
+
+ // __flags
+ {
+ QualType BPT = BE->getType();
+ const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
+ QualType ResultType = ftype->getResultType();
+
+ CallArgList Args;
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
+ FunctionType::ExtInfo());
+ if (CGM.ReturnTypeUsesSret(FnInfo))
+ flags |= BLOCK_USE_STRET;
+ }
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ C = llvm::ConstantInt::get(IntTy, flags);
+ Elts[1] = C;
+
+ // __reserved
+ C = llvm::ConstantInt::get(IntTy, 0);
+ Elts[2] = C;
+
+ if (Info.BlockLayout.empty()) {
+ // __descriptor
+ Elts[4] = BuildDescriptorBlockDecl(BE, Info.BlockHasCopyDispose,
+ Info.BlockSize, 0, 0);
+
+ // Optimize to being a global block.
+ Elts[0] = CGM.getNSConcreteGlobalBlock();
+
+ Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
+
+ C = llvm::ConstantStruct::get(VMContext, Elts, false);
+
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage, C,
+ "__block_holder_tmp_" +
+ llvm::Twine(CGM.getGlobalUniqueCount()));
+ QualType BPT = BE->getType();
+ C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
+ return C;
+ }
+
+ std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size());
+ for (int i=0; i<4; ++i)
+ Types[i] = Elts[i]->getType();
+ Types[4] = PtrToInt8Ty;
+
+ for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) {
+ const Expr *E = Info.BlockLayout[i];
+ const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+ QualType Ty = E->getType();
+ if (BDRE && BDRE->isByRef()) {
+ Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
+ } else
+ Types[i+BlockFields] = ConvertType(Ty);
+ }
+
+ llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
+
+ llvm::AllocaInst *A = CreateTempAlloca(Ty);
+ A->setAlignment(Info.BlockAlign.getQuantity());
+ V = A;
+
+ // Build layout / cleanup information for all the data entries in the
+ // layout, and write the enclosing fields into the type.
+ std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size());
+ unsigned NumHelpers = 0;
+
+ for (unsigned i=0; i<4; ++i)
+ Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
+
+ for (unsigned i=0; i < Info.BlockLayout.size(); ++i) {
+ const Expr *E = Info.BlockLayout[i];
+
+ // Skip padding.
+ if (isa<DeclRefExpr>(E)) continue;
+
+ llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
+ HelperInfo &Note = NoteForHelper[NumHelpers++];
+
+ Note.index = i+5;
+
+ if (isa<CXXThisExpr>(E)) {
+ Note.RequiresCopying = false;
+ Note.flag = BLOCK_FIELD_IS_OBJECT;
+
+ Builder.CreateStore(LoadCXXThis(), Addr);
+ continue;
+ }
+
+ const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E);
+ const ValueDecl *VD = BDRE->getDecl();
+ QualType T = VD->getType();
+
+ Note.RequiresCopying = BlockRequiresCopying(T);
+
+ if (BDRE->isByRef()) {
+ Note.flag = BLOCK_FIELD_IS_BYREF;
+ if (T.isObjCGCWeak())
+ Note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (T->isBlockPointerType()) {
+ Note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ Note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+
+ if (LocalDeclMap[VD]) {
+ if (BDRE->isByRef()) {
+ llvm::Value *Loc = LocalDeclMap[VD];
+ Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
+ Builder.CreateStore(Loc, Addr);
+ continue;
+ } else {
+ E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
+ VD->getType(),
+ SourceLocation());
+ }
+ }
+
+ if (BDRE->isByRef()) {
+ E = new (getContext())
+ UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+
+ RValue r = EmitAnyExpr(E, Addr, false);
+ if (r.isScalar()) {
+ llvm::Value *Loc = r.getScalarVal();
+ const llvm::Type *Ty = Types[i+BlockFields];
+ if (BDRE->isByRef()) {
+ // E is now the address of the value field, instead, we want the
+ // address of the actual ByRef struct. We optimize this slightly
+ // compared to gcc by not grabbing the forwarding slot as this must
+ // be done during Block_copy for us, and we can postpone the work
+ // until then.
+ CharUnits offset = BlockDecls[BDRE->getDecl()];
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+
+ Loc = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ offset.getQuantity()),
+ "block.literal");
+ Ty = llvm::PointerType::get(Ty, 0);
+ Loc = Builder.CreateBitCast(Loc, Ty);
+ Loc = Builder.CreateLoad(Loc);
+ // Loc = Builder.CreateBitCast(Loc, Ty);
+ }
+ Builder.CreateStore(Loc, Addr);
+ } else if (r.isComplex())
+ // FIXME: implement
+ ErrorUnsupported(BE, "complex in block literal");
+ else if (r.isAggregate())
+ ; // Already created into the destination
+ else
+ assert (0 && "bad block variable");
+ // FIXME: Ensure that the offset created by the backend for
+ // the struct matches the previously computed offset in BlockDecls.
+ }
+ NoteForHelper.resize(NumHelpers);
+
+ // __descriptor
+ llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE,
+ Info.BlockHasCopyDispose,
+ Info.BlockSize, Ty,
+ &NoteForHelper);
+ Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
+ Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
+ }
+
+ QualType BPT = BE->getType();
+ V = Builder.CreateBitCast(V, ConvertType(BPT));
+ // See if this is a __weak block variable and the must call objc_read_weak
+ // on it.
+ const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
+ QualType RES = ftype->getResultType();
+ if (RES.isObjCGCWeak()) {
+ // Must cast argument to id*
+ const llvm::Type *ObjectPtrTy =
+ ConvertType(CGM.getContext().getObjCIdType());
+ const llvm::Type *PtrObjectPtrTy =
+ llvm::PointerType::getUnqual(ObjectPtrTy);
+ V = Builder.CreateBitCast(V, PtrObjectPtrTy);
+ V = CGM.getObjCRuntime().EmitObjCWeakRead(*this, V);
+ }
+ return V;
+}
+
+
+const llvm::Type *BlockModule::getBlockDescriptorType() {
+ if (BlockDescriptorType)
+ return BlockDescriptorType;
+
+ const llvm::Type *UnsignedLongTy =
+ getTypes().ConvertType(getContext().UnsignedLongTy);
+
+ // struct __block_descriptor {
+ // unsigned long reserved;
+ // unsigned long block_size;
+ //
+ // // later, the following will be added
+ //
+ // struct {
+ // void (*copyHelper)();
+ // void (*copyHelper)();
+ // } helpers; // !!! optional
+ //
+ // const char *signature; // the block signature
+ // const char *layout; // reserved
+ // };
+ BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
+ UnsignedLongTy,
+ UnsignedLongTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_descriptor",
+ BlockDescriptorType);
+
+ return BlockDescriptorType;
+}
+
+const llvm::Type *BlockModule::getGenericBlockLiteralType() {
+ if (GenericBlockLiteralType)
+ return GenericBlockLiteralType;
+
+ const llvm::Type *BlockDescPtrTy =
+ llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+ PtrToInt8Ty,
+ IntTy,
+ IntTy,
+ PtrToInt8Ty,
+ BlockDescPtrTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_literal_generic",
+ GenericBlockLiteralType);
+
+ return GenericBlockLiteralType;
+}
+
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
+ ReturnValueSlot ReturnValue) {
+ const BlockPointerType *BPT =
+ E->getCallee()->getType()->getAs<BlockPointerType>();
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+ // Get a pointer to the generic block literal.
+ const llvm::Type *BlockLiteralTy =
+ llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+ // Bitcast the callee to a block literal.
+ llvm::Value *BlockLiteral =
+ Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
+
+ BlockLiteral =
+ Builder.CreateBitCast(BlockLiteral,
+ llvm::Type::getInt8PtrTy(VMContext),
+ "tmp");
+
+ // Add the block literal.
+ QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy));
+
+ QualType FnType = BPT->getPointeeType();
+
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
+ E->arg_begin(), E->arg_end());
+
+ // Load the function.
+ llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp");
+
+ const FunctionType *FuncTy = FnType->getAs<FunctionType>();
+ QualType ResultType = FuncTy->getResultType();
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, Args,
+ FuncTy->getExtInfo());
+
+ // Cast the function pointer to the right type.
+ const llvm::Type *BlockFTy =
+ CGM.getTypes().GetFunctionType(FnInfo, false);
+
+ const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+ // And call the block.
+ return EmitCall(FnInfo, Func, ReturnValue, Args);
+}
+
+void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) {
+ assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer");
+
+ // Figure out what the offset is.
+ QualType T = E->getType();
+ std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T);
+ CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second);
+
+ BlockCXXThisOffset = Offset;
+ BlockLayout.push_back(E);
+}
+
+void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
+ const ValueDecl *VD = E->getDecl();
+ CharUnits &Offset = BlockDecls[VD];
+
+ // See if we have already allocated an offset for this variable.
+ if (!Offset.isZero())
+ return;
+
+ // Don't run the expensive check, unless we have to.
+ if (!BlockHasCopyDispose)
+ if (E->isByRef()
+ || BlockRequiresCopying(E->getType()))
+ BlockHasCopyDispose = true;
+
+ const ValueDecl *D = cast<ValueDecl>(E->getDecl());
+
+ CharUnits Size;
+ CharUnits Align;
+
+ if (E->isByRef()) {
+ llvm::tie(Size,Align) =
+ getContext().getTypeInfoInChars(getContext().VoidPtrTy);
+ } else {
+ Size = getContext().getTypeSizeInChars(D->getType());
+ Align = getContext().getDeclAlign(D);
+ }
+
+ Offset = getBlockOffset(Size, Align);
+ BlockLayout.push_back(E);
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
+ bool IsByRef) {
+ CharUnits offset = BlockDecls[VD];
+ assert(!offset.isZero() && "getting address of unallocated decl");
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+ llvm::Value *V = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ offset.getQuantity()),
+ "block.literal");
+ if (IsByRef) {
+ const llvm::Type *PtrStructTy
+ = llvm::PointerType::get(BuildByRefType(VD), 0);
+ // The block literal will need a copy/destroy helper.
+ BlockHasCopyDispose = true;
+
+ const llvm::Type *Ty = PtrStructTy;
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateBitCast(V, PtrStructTy);
+ V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
+ VD->getNameAsString());
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V);
+ } else {
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
+
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ }
+ return V;
+}
+
+llvm::Constant *
+BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
+ // Generate the block descriptor.
+ const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ llvm::Constant *DescriptorFields[4];
+
+ // Reserved
+ DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
+
+ // Block literal size. For global blocks we just use the size of the generic
+ // block literal struct.
+ CharUnits BlockLiteralSize =
+ CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType());
+ DescriptorFields[1] =
+ llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity());
+
+ // signature. non-optional ObjC-style method descriptor @encode sequence
+ std::string BlockTypeEncoding;
+ CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+
+ DescriptorFields[2] = llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
+
+ // layout
+ DescriptorFields[3] =
+ llvm::ConstantInt::get(UnsignedLongTy,0);
+
+ // build the structure from the 4 elements
+ llvm::Constant *DescriptorStruct =
+ llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false);
+
+ llvm::GlobalVariable *Descriptor =
+ new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ DescriptorStruct, "__block_descriptor_global");
+
+ int FieldCount = 5;
+ // Generate the constants for the block literal.
+
+ std::vector<llvm::Constant*> LiteralFields(FieldCount);
+
+ CGBlockInfo Info(n);
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap);
+ assert(Info.BlockSize == BlockLiteralSize
+ && "no imports allowed for global block");
+
+ // isa
+ LiteralFields[0] = getNSConcreteGlobalBlock();
+
+ // Flags
+ LiteralFields[1] =
+ llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE);
+
+ // Reserved
+ LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
+
+ // Function
+ LiteralFields[3] = Fn;
+
+ // Descriptor
+ LiteralFields[4] = Descriptor;
+
+ llvm::Constant *BlockLiteralStruct =
+ llvm::ConstantStruct::get(VMContext, LiteralFields, false);
+
+ llvm::GlobalVariable *BlockLiteral =
+ new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ BlockLiteralStruct, "__block_literal_global");
+
+ return BlockLiteral;
+}
+
+llvm::Value *CodeGenFunction::LoadBlockStruct() {
+ llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()],
+ "self");
+ // For now, we codegen based upon byte offsets.
+ return Builder.CreateBitCast(V, PtrToInt8Ty);
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
+ CGBlockInfo &Info,
+ const Decl *OuterFuncDecl,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
+
+ // Check if we should generate debug info for this block.
+ if (CGM.getDebugInfo())
+ DebugInfo = CGM.getDebugInfo();
+
+ // Arrange for local static and local extern declarations to appear
+ // to be local to this function as well, as they are directly referenced
+ // in a block.
+ for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
+ i != ldm.end();
+ ++i) {
+ const VarDecl *VD = dyn_cast<VarDecl>(i->first);
+
+ if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
+ LocalDeclMap[VD] = i->second;
+ }
+
+ BlockOffset =
+ CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType());
+ BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
+
+ const FunctionType *BlockFunctionType = BExpr->getFunctionType();
+ QualType ResultType;
+ FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType);
+ bool IsVariadic;
+ if (const FunctionProtoType *FTy =
+ dyn_cast<FunctionProtoType>(BlockFunctionType)) {
+ ResultType = FTy->getResultType();
+ IsVariadic = FTy->isVariadic();
+ } else {
+ // K&R style block.
+ ResultType = BlockFunctionType->getResultType();
+ IsVariadic = false;
+ }
+
+ FunctionArgList Args;
+
+ CurFuncDecl = OuterFuncDecl;
+
+ const BlockDecl *BD = BExpr->getBlockDecl();
+
+ IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
+
+ // Build the block struct now.
+ AllocateAllBlockDeclRefs(*this, Info);
+
+ QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
+ BlockLayout);
+
+ // FIXME: This leaks
+ ImplicitParamDecl *SelfDecl =
+ ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD),
+ SourceLocation(), II,
+ ParmTy);
+
+ Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
+ BlockStructDecl = SelfDecl;
+
+ for (BlockDecl::param_const_iterator i = BD->param_begin(),
+ e = BD->param_end(); i != e; ++i)
+ Args.push_back(std::make_pair(*i, (*i)->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+
+ MangleBuffer Name;
+ CGM.getMangledName(Name, BD);
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name.getString(), &CGM.getModule());
+
+ CGM.SetInternalFunctionAttributes(BD, Fn, FI);
+
+ StartFunction(BD, ResultType, Fn, Args,
+ BExpr->getBody()->getLocEnd());
+
+ CurFuncDecl = OuterFuncDecl;
+ CurCodeDecl = BD;
+
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (Info.CXXThisRef) {
+ assert(!BlockCXXThisOffset.isZero() &&
+ "haven't yet allocated 'this' reference");
+
+ // TODO: I have a dream that one day this will be typed.
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+ llvm::Value *ThisPtrRaw =
+ Builder.CreateConstInBoundsGEP1_64(BlockLiteral,
+ BlockCXXThisOffset.getQuantity(),
+ "this.ptr.raw");
+
+ const llvm::Type *Ty =
+ CGM.getTypes().ConvertType(Info.CXXThisRef->getType());
+ Ty = llvm::PointerType::get(Ty, 0);
+ llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr");
+
+ CXXThisValue = Builder.CreateLoad(ThisPtr, "this");
+ }
+
+ // If we have an Objective C 'self' reference, go ahead and force it
+ // into existence now.
+ if (Info.NeedsObjCSelf) {
+ ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
+ LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false);
+ }
+
+ // Save a spot to insert the debug information for all the BlockDeclRefDecls.
+ llvm::BasicBlock *entry = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
+ --entry_ptr;
+
+ EmitStmt(BExpr->getBody());
+
+ // Remember where we were...
+ llvm::BasicBlock *resume = Builder.GetInsertBlock();
+
+ // Go back to the entry.
+ ++entry_ptr;
+ Builder.SetInsertPoint(entry, entry_ptr);
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ // Emit debug information for all the BlockDeclRefDecls.
+ // FIXME: also for 'this'
+ for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) {
+ if (const BlockDeclRefExpr *BDRE =
+ dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) {
+ const ValueDecl *D = BDRE->getDecl();
+ DI->setLocation(D->getLocation());
+ DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
+ LocalDeclMap[getBlockStructDecl()],
+ Builder, this);
+ }
+ }
+ }
+ // And resume where we left off.
+ if (resume == 0)
+ Builder.ClearInsertionPoint();
+ else
+ Builder.SetInsertPoint(resume);
+
+ FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
+
+ // The runtime needs a minimum alignment of a void *.
+ CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
+ BlockOffset = CharUnits::fromQuantity(
+ llvm::RoundUpToAlignment(BlockOffset.getQuantity(),
+ MinAlign.getQuantity()));
+
+ Info.BlockSize = BlockOffset;
+ Info.BlockAlign = BlockAlign;
+ Info.BlockLayout = BlockLayout;
+ Info.BlockHasCopyDispose = BlockHasCopyDispose;
+ return Fn;
+}
+
+CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) {
+ assert((Align.isPositive()) && "alignment must be 1 byte or more");
+
+ CharUnits OldOffset = BlockOffset;
+
+ // Ensure proper alignment, even if it means we have to have a gap
+ BlockOffset = CharUnits::fromQuantity(
+ llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity()));
+ BlockAlign = std::max(Align, BlockAlign);
+
+ CharUnits Pad = BlockOffset - OldOffset;
+ if (Pad.isPositive()) {
+ QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
+ llvm::APInt(32,
+ Pad.getQuantity()),
+ ArrayType::Normal, 0);
+ ValueDecl *PadDecl = VarDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(),
+ 0, QualType(PadTy), 0,
+ VarDecl::None, VarDecl::None);
+ Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
+ SourceLocation());
+ BlockLayout.push_back(E);
+ }
+
+ BlockOffset += Size;
+ return BlockOffset - Size;
+}
+
+llvm::Constant *BlockFunction::
+GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
+ DstObj = Builder.CreateLoad(DstObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
+ Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
+
+ llvm::Value *N = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(T->getContext()), flag);
+ llvm::Value *F = getBlockObjectAssign();
+ Builder.CreateCall3(F, Dstv, Srcv, N);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::
+GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
+ const llvm::StructType* T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__destroy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ BuildBlockRelease(Srcv, flag);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelper) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelper);
+}
+
+llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelperp);
+}
+
+llvm::Constant *BlockFunction::
+GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_id_object_copy_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ // dst->x
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
+
+ // src->x
+ V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateBitCast(V, T);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ llvm::Value *SrcObj = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+
+ llvm::Value *N = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(T->getContext()), flag);
+ llvm::Value *F = getBlockObjectAssign();
+ Builder.CreateCall3(F, DstObj, SrcObj, N);
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *
+BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_id_object_dispose_",
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ V = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+ BuildBlockRelease(V, flag);
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
+ int Flag, unsigned Align) {
+ // All alignments below that of pointer alignment collapse down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
+ llvm::Constant *&Entry = CGM.AssignCache[Kind];
+ if (Entry)
+ return Entry;
+ return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
+ int Flag,
+ unsigned Align) {
+ // All alignments below that of pointer alignment collpase down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
+ llvm::Constant *&Entry = CGM.DestroyCache[Kind];
+ if (Entry)
+ return Entry;
+ return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag);
+}
+
+llvm::Value *BlockFunction::getBlockObjectDispose() {
+ if (CGM.BlockObjectDispose == 0) {
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ CGM.BlockObjectDispose
+ = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
+ }
+ return CGM.BlockObjectDispose;
+}
+
+llvm::Value *BlockFunction::getBlockObjectAssign() {
+ if (CGM.BlockObjectAssign == 0) {
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ CGM.BlockObjectAssign
+ = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
+ }
+ return CGM.BlockObjectAssign;
+}
+
+void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
+ llvm::Value *F = getBlockObjectDispose();
+ llvm::Value *N;
+ V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag);
+ Builder.CreateCall2(F, V, N);
+}
+
+ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
+
+BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
+ CGBuilderTy &B)
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) {
+ PtrToInt8Ty = llvm::PointerType::getUnqual(
+ llvm::Type::getInt8Ty(VMContext));
+
+ BlockHasCopyDispose = false;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..e9b2bd5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,220 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBLOCKS_H
+#define CLANG_CODEGEN_CGBLOCKS_H
+
+#include "CodeGenTypes.h"
+#include "clang/AST/Type.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+#include <vector>
+#include <map>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class PointerType;
+ class Value;
+ class LLVMContext;
+}
+
+namespace clang {
+
+namespace CodeGen {
+class CodeGenModule;
+
+class BlockBase {
+public:
+ enum {
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_USE_STRET = (1 << 29),
+ BLOCK_HAS_SIGNATURE = (1 << 30)
+ };
+};
+
+
+class BlockModule : public BlockBase {
+ ASTContext &Context;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ CodeGenTypes &Types;
+ CodeGenModule &CGM;
+ llvm::LLVMContext &VMContext;
+
+ ASTContext &getContext() const { return Context; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+public:
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
+ const llvm::Type *getBlockDescriptorType();
+
+ const llvm::Type *getGenericBlockLiteralType();
+
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+ /// NSConcreteGlobalBlock - Cached reference to the class pointer for global
+ /// blocks.
+ llvm::Constant *NSConcreteGlobalBlock;
+
+ /// NSConcreteStackBlock - Cached reference to the class poinnter for stack
+ /// blocks.
+ llvm::Constant *NSConcreteStackBlock;
+
+ const llvm::Type *BlockDescriptorType;
+ const llvm::Type *GenericBlockLiteralType;
+
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ llvm::Value *BlockObjectAssign;
+ llvm::Value *BlockObjectDispose;
+ const llvm::Type *PtrToInt8Ty;
+
+ std::map<uint64_t, llvm::Constant *> AssignCache;
+ std::map<uint64_t, llvm::Constant *> DestroyCache;
+
+ BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
+ CodeGenTypes &T, CodeGenModule &CodeGen)
+ : Context(C), TheModule(M), TheTargetData(TD), Types(T),
+ CGM(CodeGen), VMContext(M.getContext()),
+ NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
+ GenericBlockLiteralType(0),
+ BlockObjectAssign(0), BlockObjectDispose(0) {
+ Block.GlobalUniqueCount = 0;
+ PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
+ }
+
+ bool BlockRequiresCopying(QualType Ty)
+ { return getContext().BlockRequiresCopying(Ty); }
+};
+
+class BlockFunction : public BlockBase {
+ CodeGenModule &CGM;
+ CodeGenFunction &CGF;
+ ASTContext &getContext() const;
+
+protected:
+ llvm::LLVMContext &VMContext;
+
+public:
+ const llvm::PointerType *PtrToInt8Ty;
+ struct HelperInfo {
+ int index;
+ int flag;
+ bool RequiresCopying;
+ };
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+ };
+
+ CGBuilderTy &Builder;
+
+ BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
+
+ /// BlockOffset - The offset in bytes for the next allocation of an
+ /// imported block variable.
+ CharUnits BlockOffset;
+ /// BlockAlign - Maximal alignment needed for the Block expressed in
+ /// characters.
+ CharUnits BlockAlign;
+
+ /// getBlockOffset - Allocate a location within the block's storage
+ /// for a value with the given size and alignment requirements.
+ CharUnits getBlockOffset(CharUnits Size, CharUnits Align);
+
+ /// BlockHasCopyDispose - True iff the block uses copy/dispose.
+ bool BlockHasCopyDispose;
+
+ /// BlockLayout - The layout of the block's storage, represented as
+ /// a sequence of expressions which require such storage. The
+ /// expressions can be:
+ /// - a BlockDeclRefExpr, indicating that the given declaration
+ /// from an enclosing scope is needed by the block;
+ /// - a DeclRefExpr, which always wraps an anonymous VarDecl with
+ /// array type, used to insert padding into the block; or
+ /// - a CXXThisExpr, indicating that the C++ 'this' value should
+ /// propagate from the parent to the block.
+ /// This is a really silly representation.
+ llvm::SmallVector<const Expr *, 8> BlockLayout;
+
+ /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
+ llvm::DenseMap<const Decl*, CharUnits> BlockDecls;
+
+ /// BlockCXXThisOffset - The offset of the C++ 'this' value within
+ /// the block structure.
+ CharUnits BlockCXXThisOffset;
+
+ ImplicitParamDecl *BlockStructDecl;
+ ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
+
+ llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *BuildCopyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *BuildDestroyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
+ llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
+
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
+
+ llvm::Value *getBlockObjectAssign();
+ llvm::Value *getBlockObjectDispose();
+ void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
+
+ bool BlockRequiresCopying(QualType Ty)
+ { return getContext().BlockRequiresCopying(Ty); }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..ed56bd9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,26 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBUILDER_H
+#define CLANG_CODEGEN_CGBUILDER_H
+
+#include "llvm/Support/IRBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+ // Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+ typedef llvm::IRBuilder<false> CGBuilderTy;
+#else
+ typedef llvm::IRBuilder<> CGBuilderTy;
+#endif
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..dd505c2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,1170 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+static void EmitMemoryBarrier(CodeGenFunction &CGF,
+ bool LoadLoad, bool LoadStore,
+ bool StoreLoad, bool StoreStore,
+ bool Device) {
+ Value *True = llvm::ConstantInt::getTrue(CGF.getLLVMContext());
+ Value *False = llvm::ConstantInt::getFalse(CGF.getLLVMContext());
+ Value *C[5] = { LoadLoad ? True : False,
+ LoadStore ? True : False,
+ StoreLoad ? True : False,
+ StoreStore ? True : False,
+ Device ? True : False };
+ CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier),
+ C, C + 5);
+}
+
+// The atomic builtins are also full memory barriers. This is a utility for
+// wrapping a call to the builtins with memory barriers.
+static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
+ Value **ArgBegin, Value **ArgEnd) {
+ // FIXME: We need a target hook for whether this applies to device memory or
+ // not.
+ bool Device = true;
+
+ // Create barriers both before and after the call.
+ EmitMemoryBarrier(CGF, true, true, true, true, Device);
+ Value *Result = CGF.Builder.CreateCall(Fn, ArgBegin, ArgEnd);
+ EmitMemoryBarrier(CGF, true, true, true, true, Device);
+ return Result;
+}
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
+ Intrinsic::ID Id, const CallExpr *E) {
+ Value *Args[2] = { CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1)) };
+ const llvm::Type *ResType[2];
+ ResType[0] = CGF.ConvertType(E->getType());
+ ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+ return RValue::get(EmitCallWithBarrier(CGF, AtomF, Args, Args + 2));
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+// the expression node, where the return value is the result of the
+// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
+ Intrinsic::ID Id, const CallExpr *E,
+ Instruction::BinaryOps Op) {
+ const llvm::Type *ResType[2];
+ ResType[0] = CGF.ConvertType(E->getType());
+ ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+ Value *Args[2] = { CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1)) };
+ Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1]));
+}
+
+static llvm::ConstantInt *getInt32(llvm::LLVMContext &Context, int32_t Value) {
+ return llvm::ConstantInt::get(llvm::Type::getInt32Ty(Context), Value);
+}
+
+
+/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
+/// which must be a scalar floating point type.
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
+ const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
+ assert(ValTyP && "isn't scalar fp type!");
+
+ StringRef FnName;
+ switch (ValTyP->getKind()) {
+ default: assert(0 && "Isn't a scalar fp type!");
+ case BuiltinType::Float: FnName = "fabsf"; break;
+ case BuiltinType::Double: FnName = "fabs"; break;
+ case BuiltinType::LongDouble: FnName = "fabsl"; break;
+ }
+
+ // The prototype is something that takes and returns whatever V's type is.
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(V->getType());
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), Args, false);
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
+
+ return CGF.Builder.CreateCall(Fn, V, "abs");
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E) {
+ // See if we can constant fold this builtin. If so, don't emit it at all.
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGM.getContext())) {
+ if (Result.Val.isInt())
+ return RValue::get(llvm::ConstantInt::get(VMContext,
+ Result.Val.getInt()));
+ else if (Result.Val.isFloat())
+ return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
+ }
+
+ switch (BuiltinID) {
+ default: break; // Handle intrinsics and libm functions below.
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ case Builtin::BI__builtin___NSStringMakeConstantString:
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__builtin_va_end: {
+ Value *ArgValue = EmitVAListRef(E->getArg(0));
+ const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+ if (ArgValue->getType() != DestType)
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue->getName().data());
+
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::vaend : Intrinsic::vastart;
+ return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
+ }
+ case Builtin::BI__builtin_va_copy: {
+ Value *DstPtr = EmitVAListRef(E->getArg(0));
+ Value *SrcPtr = EmitVAListRef(E->getArg(1));
+
+ const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+
+ DstPtr = Builder.CreateBitCast(DstPtr, Type);
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ DstPtr, SrcPtr));
+ }
+ case Builtin::BI__builtin_abs: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue,
+ llvm::Constant::getNullValue(ArgValue->getType()),
+ "abscond");
+ Value *Result =
+ Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ // ffs(x) -> x ? cttz(x) + 1 : 0
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
+ llvm::ConstantInt::get(ArgType, 1), "tmp");
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ // parity(x) -> ctpop(x) & 1
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
+ "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_expect:
+ // FIXME: pass expect through to LLVM
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
+ return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
+ }
+ case Builtin::BI__builtin_object_size: {
+ // We pass this builtin onto the optimizer so that it can
+ // figure out the object size in more complex cases.
+ const llvm::Type *ResType[] = {
+ ConvertType(E->getType())
+ };
+
+ // LLVM only supports 0 and 2, make sure that we pass along that
+ // as a boolean.
+ Value *Ty = EmitScalarExpr(E->getArg(1));
+ ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
+ assert(CI);
+ uint64_t val = CI->getZExtValue();
+ CI = ConstantInt::get(llvm::Type::getInt1Ty(VMContext), (val & 0x2) >> 1);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
+ return RValue::get(Builder.CreateCall2(F,
+ EmitScalarExpr(E->getArg(0)),
+ CI));
+ }
+ case Builtin::BI__builtin_prefetch: {
+ Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+ // FIXME: Technically these constants should of type 'int', yes?
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
+ return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
+ }
+ case Builtin::BI__builtin_trap: {
+ Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_unreachable: {
+ if (CatchUndefined && HaveInsertPoint())
+ EmitBranch(getTrapBB());
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
+ }
+
+ case Builtin::BI__builtin_powi:
+ case Builtin::BI__builtin_powif:
+ case Builtin::BI__builtin_powil: {
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered: {
+ // Ordered comparisons: we know the arguments to these are matching scalar
+ // floating point values.
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unknown ordered comparison");
+ case Builtin::BI__builtin_isgreater:
+ LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isgreaterequal:
+ LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isless:
+ LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessequal:
+ LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessgreater:
+ LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isunordered:
+ LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+ break;
+ }
+ // ZExt bool to int type.
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
+ "tmp"));
+ }
+ case Builtin::BI__builtin_isnan: {
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = Builder.CreateFCmpUNO(V, V, "cmp");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ }
+
+ case Builtin::BI__builtin_isinf: {
+ // isinf(x) --> fabs(x) == infinity
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = EmitFAbs(*this, V, E->getArg(0)->getType());
+
+ V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ }
+
+ // TODO: BI__builtin_isinf_sign
+ // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
+
+ case Builtin::BI__builtin_isnormal: {
+ // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsLessThanInf =
+ Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
+ V = Builder.CreateAnd(V, IsNormal, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfinite: {
+ // isfinite(x) --> x == x && fabs(x) != infinity; }
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsNotInf =
+ Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+
+ V = Builder.CreateAnd(Eq, IsNotInf, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BIalloca:
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: LLVM IR Should allow alloca with an i64 size!
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
+ return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
+ }
+ case Builtin::BIbzero:
+ case Builtin::BI__builtin_bzero: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SizeVal = EmitScalarExpr(E->getArg(1));
+ Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
+ Address,
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
+ SizeVal,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+ case Builtin::BImemcpy:
+ case Builtin::BI__builtin_memcpy: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(),
+ SizeVal->getType()),
+ Address, SrcAddr, SizeVal,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+ case Builtin::BImemmove:
+ case Builtin::BI__builtin_memmove: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(),
+ SizeVal->getType()),
+ Address, SrcAddr, SizeVal,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+ case Builtin::BImemset:
+ case Builtin::BI__builtin_memset: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ Builder.CreateCall5(CGM.getMemSetFn(Address->getType(), SizeVal->getType()),
+ Address,
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ llvm::Type::getInt8Ty(VMContext)),
+ SizeVal,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_dwarf_cfa: {
+ // The offset in bytes from the first argument to the CFA.
+ //
+ // Why on earth is this in the frontend? Is there any reason at
+ // all that the backend can't reasonably determine this while
+ // lowering llvm.eh.dwarf.cfa()?
+ //
+ // TODO: If there's a satisfactory reason, add a target hook for
+ // this instead of hard-coding 0, which is correct for most targets.
+ int32_t Offset = 0;
+
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0);
+ return RValue::get(Builder.CreateCall(F, getInt32(VMContext, Offset)));
+ }
+ case Builtin::BI__builtin_return_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth,
+ llvm::Type::getInt32Ty(VMContext),
+ false, "tmp");
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_frame_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth,
+ llvm::Type::getInt32Ty(VMContext),
+ false, "tmp");
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_extract_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_frob_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_dwarf_sp_column: {
+ const llvm::IntegerType *Ty
+ = cast<llvm::IntegerType>(ConvertType(E->getType()));
+ int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
+ if (Column == -1) {
+ CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
+ return RValue::get(llvm::UndefValue::get(Ty));
+ }
+ return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
+ }
+ case Builtin::BI__builtin_init_dwarf_reg_size_table: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
+ CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_eh_return: {
+ Value *Int = EmitScalarExpr(E->getArg(0));
+ Value *Ptr = EmitScalarExpr(E->getArg(1));
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
+ "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
+ Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
+ ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64,
+ 0, 0);
+ Builder.CreateCall2(F, Int, Ptr);
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
+ }
+ case Builtin::BI__builtin_unwind_init: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_extend_pointer: {
+ // Extends a pointer to the size of an _Unwind_Word, which is
+ // uint64_t on all platforms. Generally this gets poked into a
+ // register and eventually used as an address, so if the
+ // addressing registers are wider than pointers and the platform
+ // doesn't implicitly ignore high-order bits when doing
+ // addressing, we need to make sure we zext / sext based on
+ // the platform's expectations.
+ //
+ // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
+
+ LLVMContext &C = CGM.getLLVMContext();
+
+ // Cast the pointer to intptr_t.
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ const llvm::IntegerType *IntPtrTy = CGM.getTargetData().getIntPtrType(C);
+ Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
+
+ // If that's 64 bits, we're done.
+ if (IntPtrTy->getBitWidth() == 64)
+ return RValue::get(Result);
+
+ // Otherwise, ask the codegen data what to do.
+ const llvm::IntegerType *Int64Ty = llvm::IntegerType::get(C, 64);
+ if (getTargetHooks().extendPointerWithSExt())
+ return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
+ else
+ return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
+ }
+#if 0
+ // FIXME: Finish/enable when LLVM backend support stabilizes
+ case Builtin::BI__builtin_setjmp: {
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ // Store the frame pointer to the buffer
+ Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ Value *FrameAddr =
+ Builder.CreateCall(FrameAddrF,
+ Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
+ Builder.CreateStore(FrameAddr, Buf);
+ // Call the setjmp intrinsic
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
+ const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+ Buf = Builder.CreateBitCast(Buf, DestType);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+ case Builtin::BI__builtin_longjmp: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+ Buf = Builder.CreateBitCast(Buf, DestType);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+#endif
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ assert(0 && "Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ llvm::Instruction::Add);
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
+ llvm::Instruction::Sub);
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
+ llvm::Instruction::And);
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
+ llvm::Instruction::Or);
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
+ llvm::Instruction::Xor);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16: {
+ const llvm::Type *ResType[2];
+ ResType[0]= ConvertType(E->getType());
+ ResType[1] = ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+ Value *Args[3] = { EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)) };
+ return RValue::get(EmitCallWithBarrier(*this, AtomF, Args, Args + 3));
+ }
+
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16: {
+ const llvm::Type *ResType[2];
+ ResType[0]= ConvertType(E->getArg(1)->getType());
+ ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+ Value *OldVal = EmitScalarExpr(E->getArg(1));
+ Value *Args[3] = { EmitScalarExpr(E->getArg(0)),
+ OldVal,
+ EmitScalarExpr(E->getArg(2)) };
+ Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+ // zext bool to int.
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ElTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ llvm::StoreInst *Store =
+ Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
+ Store->setVolatile(true);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__sync_synchronize: {
+ // We assume like gcc appears to, that this only applies to cached memory.
+ EmitMemoryBarrier(*this, true, true, true, true, false);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__builtin_llvm_memory_barrier: {
+ Value *C[5] = {
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)),
+ EmitScalarExpr(E->getArg(4))
+ };
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ return RValue::get(0);
+ }
+
+ // Library functions with special handling.
+ case Builtin::BIsqrt:
+ case Builtin::BIsqrtf:
+ case Builtin::BIsqrtl: {
+ // TODO: there is currently no set of optimizer flags
+ // sufficient for us to rewrite sqrt to @llvm.sqrt.
+ // -fmath-errno=0 is not good enough; we need finiteness.
+ // We could probably precondition the call with an ult
+ // against 0, but is that worth the complexity?
+ break;
+ }
+
+ case Builtin::BIpow:
+ case Builtin::BIpowf:
+ case Builtin::BIpowl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+
+ case Builtin::BI__builtin_signbit:
+ case Builtin::BI__builtin_signbitf:
+ case Builtin::BI__builtin_signbitl: {
+ LLVMContext &C = CGM.getLLVMContext();
+
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgTy = Arg->getType();
+ if (ArgTy->isPPC_FP128Ty())
+ break; // FIXME: I'm not sure what the right implementation is here.
+ int ArgWidth = ArgTy->getPrimitiveSizeInBits();
+ const llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
+ Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
+ Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+ }
+
+ // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
+ // that function.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
+ getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return EmitCall(E->getCallee()->getType(),
+ CGM.getBuiltinLibFunction(FD, BuiltinID),
+ ReturnValueSlot(),
+ E->arg_begin(), E->arg_end());
+
+ // See if we have a target specific intrinsic.
+ const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+ Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
+ if (const char *Prefix =
+ llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+
+ if (IntrinsicID != Intrinsic::not_intrinsic) {
+ SmallVector<Value*, 16> Args;
+
+ Function *F = CGM.getIntrinsic(IntrinsicID);
+ const llvm::FunctionType *FTy = F->getFunctionType();
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Value *ArgValue = EmitScalarExpr(E->getArg(i));
+
+ // If the intrinsic arg type is different from the builtin arg type
+ // we need to do a bit cast.
+ const llvm::Type *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType()) {
+ assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+ "Must be able to losslessly bit cast to param");
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ }
+
+ Args.push_back(ArgValue);
+ }
+
+ Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+ QualType BuiltinRetType = E->getType();
+
+ const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
+ if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
+
+ if (RetTy != V->getType()) {
+ assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+ "Must be able to losslessly bit cast result type");
+ V = Builder.CreateBitCast(V, RetTy);
+ }
+
+ return RValue::get(V);
+ }
+
+ // See if we have a target specific builtin that needs to be lowered.
+ if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
+ ErrorUnsupported(E, "builtin function");
+
+ // Unknown builtin, for now just dump it out and return undef.
+ if (hasAggregateLLVMType(E->getType()))
+ return RValue::getAggregate(CreateMemTemp(E->getType()));
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (Target.getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return EmitARMBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return EmitX86BuiltinExpr(BuiltinID, E);
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ return EmitPPCBuiltinExpr(BuiltinID, E);
+ default:
+ return 0;
+ }
+}
+
+Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ default: return 0;
+
+ case ARM::BI__builtin_thread_pointer: {
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::arm_thread_pointer, 0, 0);
+ return Builder.CreateCall(AtomF);
+ }
+ }
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ switch (BuiltinID) {
+ default: return 0;
+ case X86::BI__builtin_ia32_pslldi128:
+ case X86::BI__builtin_ia32_psllqi128:
+ case X86::BI__builtin_ia32_psllwi128:
+ case X86::BI__builtin_ia32_psradi128:
+ case X86::BI__builtin_ia32_psrawi128:
+ case X86::BI__builtin_ia32_psrldi128:
+ case X86::BI__builtin_ia32_psrlqi128:
+ case X86::BI__builtin_ia32_psrlwi128: {
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
+ Ops[1], Zero, "insert");
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi128:
+ name = "pslldi";
+ ID = Intrinsic::x86_sse2_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi128:
+ name = "psllqi";
+ ID = Intrinsic::x86_sse2_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi128:
+ name = "psllwi";
+ ID = Intrinsic::x86_sse2_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi128:
+ name = "psradi";
+ ID = Intrinsic::x86_sse2_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi128:
+ name = "psrawi";
+ ID = Intrinsic::x86_sse2_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi128:
+ name = "psrldi";
+ ID = Intrinsic::x86_sse2_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi128:
+ name = "psrlqi";
+ ID = Intrinsic::x86_sse2_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi128:
+ name = "psrlwi";
+ ID = Intrinsic::x86_sse2_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_pslldi:
+ case X86::BI__builtin_ia32_psllqi:
+ case X86::BI__builtin_ia32_psllwi:
+ case X86::BI__builtin_ia32_psradi:
+ case X86::BI__builtin_ia32_psrawi:
+ case X86::BI__builtin_ia32_psrldi:
+ case X86::BI__builtin_ia32_psrlqi:
+ case X86::BI__builtin_ia32_psrlwi: {
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi:
+ name = "pslldi";
+ ID = Intrinsic::x86_mmx_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi:
+ name = "psllqi";
+ ID = Intrinsic::x86_mmx_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi:
+ name = "psllwi";
+ ID = Intrinsic::x86_mmx_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi:
+ name = "psradi";
+ ID = Intrinsic::x86_mmx_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi:
+ name = "psrawi";
+ ID = Intrinsic::x86_mmx_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi:
+ name = "psrldi";
+ ID = Intrinsic::x86_mmx_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi:
+ name = "psrlqi";
+ ID = Intrinsic::x86_mmx_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi:
+ name = "psrlwi";
+ ID = Intrinsic::x86_mmx_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_cmpps: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+ }
+ case X86::BI__builtin_ia32_cmpss: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+ }
+ case X86::BI__builtin_ia32_ldmxcsr: {
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+ Builder.CreateStore(Ops[0], Tmp);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ }
+ case X86::BI__builtin_ia32_stmxcsr: {
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+ One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ return Builder.CreateLoad(Tmp, "stmxcsr");
+ }
+ case X86::BI__builtin_ia32_cmppd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+ }
+ case X86::BI__builtin_ia32_cmpsd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+ }
+ case X86::BI__builtin_ia32_storehps:
+ case X86::BI__builtin_ia32_storelps: {
+ const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
+ llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+
+ // cast val v2i64
+ Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+ // extract (0, 1)
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+ // cast pointer to i64 & store
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case X86::BI__builtin_ia32_palignr: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+
+ llvm::SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+ const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_palignr128: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+
+ llvm::SmallVector<llvm::Constant*, 16> Indices;
+ for (unsigned i = 0; i != 16; ++i)
+ Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+ const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+ const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ }
+}
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ // vec_st
+ case PPC::BI__builtin_altivec_stvx:
+ case PPC::BI__builtin_altivec_stvxl:
+ case PPC::BI__builtin_altivec_stvebx:
+ case PPC::BI__builtin_altivec_stvehx:
+ case PPC::BI__builtin_altivec_stvewx:
+ {
+ Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext));
+ Ops[1] = !isa<Constant>(Ops[1]) || !cast<Constant>(Ops[1])->isNullValue()
+ ? Builder.CreateGEP(Ops[2], Ops[1], "tmp") : Ops[2];
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported vavg intrinsic!");
+ case PPC::BI__builtin_altivec_stvx:
+ ID = Intrinsic::ppc_altivec_stvx;
+ break;
+ case PPC::BI__builtin_altivec_stvxl:
+ ID = Intrinsic::ppc_altivec_stvxl;
+ break;
+ case PPC::BI__builtin_altivec_stvebx:
+ ID = Intrinsic::ppc_altivec_stvebx;
+ break;
+ case PPC::BI__builtin_altivec_stvehx:
+ ID = Intrinsic::ppc_altivec_stvehx;
+ break;
+ case PPC::BI__builtin_altivec_stvewx:
+ ID = Intrinsic::ppc_altivec_stvewx;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ }
+ }
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..5258779
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,338 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CGCXXABI.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Determines whether the given function has a trivial body that does
+/// not require any specific codegen.
+static bool HasTrivialBody(const FunctionDecl *FD) {
+ Stmt *S = FD->getBody();
+ if (!S)
+ return true;
+ if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
+ return true;
+ return false;
+}
+
+/// Try to emit a base destructor as an alias to its primary
+/// base-class destructor.
+bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // If the destructor doesn't have a trivial body, we have to emit it
+ // separately.
+ if (!HasTrivialBody(D))
+ return true;
+
+ const CXXRecordDecl *Class = D->getParent();
+
+ // If we need to manipulate a VTT parameter, give up.
+ if (Class->getNumVBases()) {
+ // Extra Credit: passing extra parameters is perfectly safe
+ // in many calling conventions, so only bail out if the ctor's
+ // calling convention is nonstandard.
+ return true;
+ }
+
+ // If any fields have a non-trivial destructor, we have to emit it
+ // separately.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I)
+ if (const RecordType *RT = (*I)->getType()->getAs<RecordType>())
+ if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor())
+ return true;
+
+ // Try to find a unique base class with a non-trivial destructor.
+ const CXXRecordDecl *UniqueBase = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+
+ // We're in the base destructor, so skip virtual bases.
+ if (I->isVirtual()) continue;
+
+ // Skip base classes with trivial destructors.
+ const CXXRecordDecl *Base
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (Base->hasTrivialDestructor()) continue;
+
+ // If we've already found a base class with a non-trivial
+ // destructor, give up.
+ if (UniqueBase) return true;
+ UniqueBase = Base;
+ }
+
+ // If we didn't find any bases with a non-trivial destructor, then
+ // the base destructor is actually effectively trivial, which can
+ // happen if it was needlessly user-defined or if there are virtual
+ // bases with non-trivial destructors.
+ if (!UniqueBase)
+ return true;
+
+ /// If we don't have a definition for the destructor yet, don't
+ /// emit. We can't emit aliases to declarations; that's just not
+ /// how aliases work.
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(getContext());
+ if (!BaseD->isImplicit() && !BaseD->getBody())
+ return true;
+
+ // If the base is at a non-zero offset, give up.
+ const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
+ if (ClassLayout.getBaseClassOffset(UniqueBase) != 0)
+ return true;
+
+ return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
+ GlobalDecl(BaseD, Dtor_Base));
+}
+
+/// Try to emit a definition as a global alias for another definition.
+bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // The alias will use the linkage of the referrent. If we can't
+ // support aliases with that linkage, fail.
+ llvm::GlobalValue::LinkageTypes Linkage
+ = getFunctionLinkage(cast<FunctionDecl>(AliasDecl.getDecl()));
+
+ switch (Linkage) {
+ // We can definitely emit aliases to definitions with external linkage.
+ case llvm::GlobalValue::ExternalLinkage:
+ case llvm::GlobalValue::ExternalWeakLinkage:
+ break;
+
+ // Same with local linkage.
+ case llvm::GlobalValue::InternalLinkage:
+ case llvm::GlobalValue::PrivateLinkage:
+ case llvm::GlobalValue::LinkerPrivateLinkage:
+ break;
+
+ // We should try to support linkonce linkages.
+ case llvm::GlobalValue::LinkOnceAnyLinkage:
+ case llvm::GlobalValue::LinkOnceODRLinkage:
+ return true;
+
+ // Other linkages will probably never be supported.
+ default:
+ return true;
+ }
+
+ llvm::GlobalValue::LinkageTypes TargetLinkage
+ = getFunctionLinkage(cast<FunctionDecl>(TargetDecl.getDecl()));
+
+ if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
+ return true;
+
+ // Derive the type for the alias.
+ const llvm::PointerType *AliasType
+ = getTypes().GetFunctionType(AliasDecl)->getPointerTo();
+
+ // Find the referrent. Some aliases might require a bitcast, in
+ // which case the caller is responsible for ensuring the soundness
+ // of these semantics.
+ llvm::GlobalValue *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
+ llvm::Constant *Aliasee = Ref;
+ if (Ref->getType() != AliasType)
+ Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+
+ // Create the alias with no name.
+ llvm::GlobalAlias *Alias =
+ new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
+
+ // Switch any previous uses to the alias.
+ MangleBuffer MangledName;
+ getMangledName(MangledName, AliasDecl);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ assert(Entry->isDeclaration() && "definition already exists for alias");
+ assert(Entry->getType() == AliasType &&
+ "declaration exists with different type");
+ Alias->takeName(Entry);
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ } else {
+ Alias->setName(MangledName.getString());
+ }
+
+ // Finally, set up the alias with its proper name and attributes.
+ SetCommonAttributes(AliasDecl.getDecl(), Alias);
+
+ return false;
+}
+
+void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ // The constructor used for constructing this as a complete class;
+ // constucts the virtual bases, then calls the base constructor.
+ EmitGlobal(GlobalDecl(D, Ctor_Complete));
+
+ // The constructor used for constructing this as a base class;
+ // ignores virtual bases.
+ EmitGlobal(GlobalDecl(D, Ctor_Base));
+}
+
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ // The complete constructor is equivalent to the base constructor
+ // for classes with no virtual bases. Try to emit it as an alias.
+ if (Type == Ctor_Complete &&
+ !D->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(D, Ctor_Complete),
+ GlobalDecl(D, Ctor_Base)))
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXConstructor(D, Type));
+ setFunctionLinkage(D, Fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ MangleBuffer Name;
+ getMangledCXXCtorName(Name, D, Type);
+ if (llvm::GlobalValue *V = GetGlobalValue(Name))
+ return V;
+
+ const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
+ FPT->isVariadic());
+ return cast<llvm::Function>(
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+void CodeGenModule::getMangledName(MangleBuffer &Buffer, const BlockDecl *BD) {
+ getMangleContext().mangleBlock(BD, Buffer.getBuffer());
+}
+
+void CodeGenModule::getMangledCXXCtorName(MangleBuffer &Name,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ getMangleContext().mangleCXXCtor(D, Type, Name.getBuffer());
+}
+
+void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ // The destructor in a virtual table is always a 'deleting'
+ // destructor, which calls the complete destructor and then uses the
+ // appropriate operator delete.
+ if (D->isVirtual())
+ EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+
+ // The destructor used for destructing this as a most-derived class;
+ // call the base destructor and then destructs any virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Complete));
+
+ // The destructor used for destructing this as a base class; ignores
+ // virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Base));
+}
+
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ // The complete destructor is equivalent to the base destructor for
+ // classes with no virtual bases, so try to emit it as an alias.
+ if (Type == Dtor_Complete &&
+ !D->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Complete),
+ GlobalDecl(D, Dtor_Base)))
+ return;
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (Type == Dtor_Base && !TryEmitBaseDestructorAsAlias(D))
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXDestructor(D, Type));
+ setFunctionLinkage(D, Fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ MangleBuffer Name;
+ getMangledCXXDtorName(Name, D, Type);
+ if (llvm::GlobalValue *V = GetGlobalValue(Name))
+ return V;
+
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
+
+ return cast<llvm::Function>(
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+void CodeGenModule::getMangledCXXDtorName(MangleBuffer &Name,
+ const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ getMangleContext().mangleCXXDtor(D, Type, Name.getBuffer());
+}
+
+static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
+ llvm::Value *This, const llvm::Type *Ty) {
+ Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
+
+ llvm::Value *VTable = CGF.Builder.CreateBitCast(This, Ty);
+ VTable = CGF.Builder.CreateLoad(VTable);
+
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ return CGF.Builder.CreateLoad(VFuncPtr);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ const llvm::Type *Ty) {
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *&This, const llvm::Type *Ty) {
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
+CXXABI::~CXXABI() {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h
new file mode 100644
index 0000000..1e6adb0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.h
@@ -0,0 +1,36 @@
+//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCXX_H
+#define CLANG_CODEGEN_CGCXX_H
+
+namespace clang {
+
+/// CXXCtorType - C++ constructor types
+enum CXXCtorType {
+ Ctor_Complete, // Complete object ctor
+ Ctor_Base, // Base object ctor
+ Ctor_CompleteAllocating // Complete object allocating ctor
+};
+
+/// CXXDtorType - C++ destructor types
+enum CXXDtorType {
+ Dtor_Deleting, // Deleting dtor
+ Dtor_Complete, // Complete object dtor
+ Dtor_Base // Base object dtor
+};
+
+} // end namespace clang
+
+#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
new file mode 100644
index 0000000..a7e1871
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -0,0 +1,37 @@
+//===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CXXABI_H
+#define CLANG_CODEGEN_CXXABI_H
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenModule;
+ class MangleContext;
+
+/// Implements C++ ABI-specific code generation functions.
+class CXXABI {
+public:
+ virtual ~CXXABI();
+
+ /// Gets the mangle context.
+ virtual MangleContext &getMangleContext() = 0;
+};
+
+/// Creates an instance of a C++ ABI class.
+CXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..73cee3c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,1122 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Target/TargetData.h"
+
+#include "ABIInfo.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+// FIXME: Use iterator and sidestep silly type array creation.
+
+static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
+ switch (CC) {
+ default: return llvm::CallingConv::C;
+ case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
+ case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ }
+}
+
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// qualification.
+/// FIXME: address space qualification?
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+ QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+}
+
+/// Returns the canonical formal type of the given C++ method.
+static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
+ return MD->getType()->getCanonicalTypeUnqualified()
+ .getAs<FunctionProtoType>();
+}
+
+/// Returns the "extra-canonicalized" return type, which discards
+/// qualifiers on the return type. Codegen doesn't care about them,
+/// and it makes ABI code a little easier to be able to assume that
+/// all parameter and return types are top-level unqualified.
+static CanQualType GetReturnType(QualType RetTy) {
+ return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
+}
+
+const CGFunctionInfo &
+CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
+ return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
+ llvm::SmallVector<CanQualType, 16>(),
+ FTNP->getExtInfo());
+}
+
+/// \param Args - contains any initial parameters besides those
+/// in the formal type
+static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ CanQual<FunctionProtoType> FTP) {
+ // FIXME: Kill copy.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+ CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
+ return CGT.getFunctionInfo(ResTy, ArgTys,
+ FTP->getExtInfo());
+}
+
+const CGFunctionInfo &
+CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ return ::getFunctionInfo(*this, ArgTys, FTP);
+}
+
+static CallingConv getCallingConventionForDecl(const Decl *D) {
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<StdCallAttr>())
+ return CC_X86StdCall;
+
+ if (D->hasAttr<FastCallAttr>())
+ return CC_X86FastCall;
+
+ if (D->hasAttr<ThisCallAttr>())
+ return CC_X86ThisCall;
+
+ return CC_C;
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
+ // Add the 'this' pointer.
+ ArgTys.push_back(GetThisType(Context, RD));
+
+ return ::getFunctionInfo(*this, ArgTys,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
+ // Add the 'this' pointer unless this is a static method.
+ if (MD->isInstance())
+ ArgTys.push_back(GetThisType(Context, MD->getParent()));
+
+ return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
+ // Add the 'this' pointer.
+ ArgTys.push_back(GetThisType(Context, D->getParent()));
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+
+ return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
+ // Add the 'this' pointer.
+ ArgTys.push_back(GetThisType(Context, D->getParent()));
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+
+ return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance())
+ return getFunctionInfo(MD);
+
+ CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+ assert(isa<FunctionType>(FTy));
+ if (isa<FunctionNoProtoType>(FTy))
+ return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+ assert(isa<FunctionProtoType>(FTy));
+ return getFunctionInfo(FTy.getAs<FunctionProtoType>());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
+ ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+ // FIXME: Kill copy?
+ for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
+ e = MD->param_end(); i != e; ++i) {
+ ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ }
+ return getFunctionInfo(GetReturnType(MD->getResultType()),
+ ArgTys,
+ FunctionType::ExtInfo(
+ /*NoReturn*/ false,
+ /*RegParm*/ 0,
+ getCallingConventionForDecl(MD)));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
+ // FIXME: Do we need to handle ObjCMethodDecl?
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ return getFunctionInfo(CD, GD.getCtorType());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
+ return getFunctionInfo(DD, GD.getDtorType());
+
+ return getFunctionInfo(FD);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const CallArgList &Args,
+ const FunctionType::ExtInfo &Info) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ const FunctionType::ExtInfo &Info) {
+#ifndef NDEBUG
+ for (llvm::SmallVectorImpl<CanQualType>::const_iterator
+ I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
+ assert(I->isCanonicalAsParam());
+#endif
+
+ unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
+
+ // Lookup or create unique function info.
+ llvm::FoldingSetNodeID ID;
+ CGFunctionInfo::Profile(ID, Info, ResTy,
+ ArgTys.begin(), ArgTys.end());
+
+ void *InsertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+ if (FI)
+ return *FI;
+
+ // Construct the function info.
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys);
+ FunctionInfos.InsertNode(FI, InsertPos);
+
+ // Compute ABI information.
+ getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
+
+ return *FI;
+}
+
+CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
+ bool _NoReturn,
+ unsigned _RegParm,
+ CanQualType ResTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys)
+ : CallingConvention(_CallingConvention),
+ EffectiveCallingConvention(_CallingConvention),
+ NoReturn(_NoReturn), RegParm(_RegParm)
+{
+ NumArgs = ArgTys.size();
+ Args = new ArgInfo[1 + NumArgs];
+ Args[0].type = ResTy;
+ for (unsigned i = 0; i < NumArgs; ++i)
+ Args[1 + i].type = ArgTys[i];
+}
+
+/***/
+
+void CodeGenTypes::GetExpandedTypes(QualType Ty,
+ std::vector<const llvm::Type*> &ArgTys) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+
+ QualType FT = FD->getType();
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ GetExpandedTypes(FT, ArgTys);
+ } else {
+ ArgTys.push_back(ConvertType(FT));
+ }
+ }
+}
+
+llvm::Function::arg_iterator
+CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator AI) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
+ llvm::Value *Addr = LV.getAddress();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ AI = ExpandTypeFromArgs(FT, LV, AI);
+ } else {
+ EmitStoreThroughLValue(RValue::get(AI), LV, FT);
+ ++AI;
+ }
+ }
+
+ return AI;
+}
+
+void
+CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ llvm::SmallVector<llvm::Value*, 16> &Args) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
+ } else {
+ RValue RV = EmitLoadOfLValue(LV, FT);
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+ Args.push_back(RV.getScalarVal());
+ }
+ }
+}
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
+ const llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ // If load is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ return Load;
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
+ }
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+ llvm::Value *DstPtr,
+ bool DstIsVolatile,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy = Src->getType();
+ const llvm::Type *DstTy =
+ cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ // If store is legal, just bitcast the src pointer.
+ if (SrcSize <= DstSize) {
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
+ CGF.Builder.CreateStore(Src, Tmp);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
+ }
+}
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
+ return FI.getReturnInfo().isIndirect();
+}
+
+const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = getFunctionInfo(GD);
+
+ // For definition purposes, don't consider a K&R function variadic.
+ bool Variadic = false;
+ if (const FunctionProtoType *FPT =
+ cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
+ Variadic = FPT->isVariadic();
+
+ return GetFunctionType(FI, Variadic);
+}
+
+const llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
+ std::vector<const llvm::Type*> ArgTys;
+
+ const llvm::Type *ResultType = 0;
+
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ ResultType = ConvertType(RetTy);
+ break;
+
+ case ABIArgInfo::Indirect: {
+ assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
+ const llvm::Type *STy = ConvertType(RetTy);
+ ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
+ break;
+
+ case ABIArgInfo::Coerce:
+ ResultType = RetAI.getCoerceToType();
+ break;
+ }
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ const ABIArgInfo &AI = it->info;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce:
+ ArgTys.push_back(AI.getCoerceToType());
+ break;
+
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ const llvm::Type *LTy = ConvertTypeForMem(it->type);
+ ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ ArgTys.push_back(ConvertType(it->type));
+ break;
+
+ case ABIArgInfo::Expand:
+ GetExpandedTypes(it->type, ArgTys);
+ break;
+ }
+ }
+
+ return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
+}
+
+static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
+ if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
+ if (!TT->getDecl()->isDefinition())
+ return true;
+ }
+
+ for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
+ if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
+ if (!TT->getDecl()->isDefinition())
+ return true;
+ }
+ }
+
+ return false;
+}
+
+const llvm::Type *
+CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
+ return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
+
+ return llvm::OpaqueType::get(getLLVMContext());
+}
+
+void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv) {
+ unsigned FuncAttrs = 0;
+ unsigned RetAttrs = 0;
+
+ CallingConv = FI.getEffectiveCallingConvention();
+
+ if (FI.isNoReturn())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+
+ // FIXME: handle sseregparm someday...
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<NoThrowAttr>())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ if (TargetDecl->hasAttr<NoReturnAttr>())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+ if (TargetDecl->hasAttr<ConstAttr>())
+ FuncAttrs |= llvm::Attribute::ReadNone;
+ else if (TargetDecl->hasAttr<PureAttr>())
+ FuncAttrs |= llvm::Attribute::ReadOnly;
+ if (TargetDecl->hasAttr<MallocAttr>())
+ RetAttrs |= llvm::Attribute::NoAlias;
+ }
+
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs |= llvm::Attribute::OptimizeForSize;
+ if (CodeGenOpts.DisableRedZone)
+ FuncAttrs |= llvm::Attribute::NoRedZone;
+ if (CodeGenOpts.NoImplicitFloat)
+ FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+
+ QualType RetTy = FI.getReturnType();
+ unsigned Index = 1;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (RetTy->isSignedIntegerType()) {
+ RetAttrs |= llvm::Attribute::SExt;
+ } else if (RetTy->isUnsignedIntegerType()) {
+ RetAttrs |= llvm::Attribute::ZExt;
+ }
+ // FALLTHROUGH
+ case ABIArgInfo::Direct:
+ break;
+
+ case ABIArgInfo::Indirect:
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attribute::StructRet));
+ ++Index;
+ // sret disables readnone and readonly
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::Coerce:
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ if (RetAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+
+ // FIXME: we need to honour command line settings also...
+ // FIXME: RegParm should be reduced in case of nested functions and/or global
+ // register variable.
+ signed RegParm = FI.getRegParm();
+
+ unsigned PointerWidth = getContext().Target.getPointerWidth(0);
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ QualType ParamType = it->type;
+ const ABIArgInfo &AI = it->info;
+ unsigned Attributes = 0;
+
+ // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
+ // have the corresponding parameter variable. It doesn't make
+ // sense to do it here because parameters are so fucked up.
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Coerce:
+ break;
+
+ case ABIArgInfo::Indirect:
+ if (AI.getIndirectByVal())
+ Attributes |= llvm::Attribute::ByVal;
+
+ Attributes |=
+ llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+ // byval disables readnone and readonly.
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Extend:
+ if (ParamType->isSignedIntegerType()) {
+ Attributes |= llvm::Attribute::SExt;
+ } else if (ParamType->isUnsignedIntegerType()) {
+ Attributes |= llvm::Attribute::ZExt;
+ }
+ // FALLS THROUGH
+ case ABIArgInfo::Direct:
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attributes |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+ break;
+
+ case ABIArgInfo::Ignore:
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Expand: {
+ std::vector<const llvm::Type*> Tys;
+ // FIXME: This is rather inefficient. Do we ever actually need to do
+ // anything here? The result should be just reconstructed on the other
+ // side, so extension should be a non-issue.
+ getTypes().GetExpandedTypes(ParamType, Tys);
+ Index += Tys.size();
+ continue;
+ }
+ }
+
+ if (Attributes)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+ ++Index;
+ }
+ if (FuncAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ // If this is an implicit-return-zero function, go ahead and
+ // initialize the return value. TODO: it might be nice to have
+ // a more general mechanism for this that didn't require synthesized
+ // return statements.
+ if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (FD->hasImplicitReturnZero()) {
+ QualType RetTy = FD->getResultType().getUnqualifiedType();
+ const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+ Builder.CreateStore(Zero, ReturnValue);
+ }
+ }
+
+ // FIXME: We no longer need the types from FunctionArgList; lift up and
+ // simplify.
+
+ // Emit allocs for param decls. Give the LLVM Argument nodes names.
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+
+ // Name the struct return argument.
+ if (CGM.ReturnTypeUsesSret(FI)) {
+ AI->setName("agg.result");
+ ++AI;
+ }
+
+ assert(FI.arg_size() == Args.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it) {
+ const VarDecl *Arg = i->first;
+ QualType Ty = info_it->type;
+ const ABIArgInfo &ArgI = info_it->info;
+
+ switch (ArgI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ llvm::Value* V = AI;
+ if (hasAggregateLLVMType(Ty)) {
+ // Do nothing, aggregates and complex variables are accessed by
+ // reference.
+ } else {
+ // Load scalar value from indirect argument.
+ V = EmitLoadOfScalar(V, false, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value* V = AI;
+ if (hasAggregateLLVMType(Ty)) {
+ // Create a temporary alloca to hold the argument; the rest of
+ // codegen expects to access aggregates & complex values by
+ // reference.
+ V = CreateMemTemp(Ty);
+ Builder.CreateStore(AI, V);
+ } else {
+ if (Arg->getType().isRestrictQualified())
+ AI->addAttr(llvm::Attribute::NoAlias);
+
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
+ // FIXME: What are the right qualifiers here?
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
+ EmitParmDecl(*Arg, Temp);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Arg->getName() + "." + llvm::Twine(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty)) {
+ EmitParmDecl(*Arg, CreateMemTemp(Ty));
+ } else {
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+ }
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Coerce: {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
+ // result in a new alloca anyway, so we could just store into that
+ // directly if we broke the abstraction down more.
+ llvm::Value *V = CreateMemTemp(Ty, "coerce");
+ CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
+ // Match to what EmitParmDecl is expecting for this type.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ V = EmitLoadOfScalar(V, false, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+ }
+
+ ++AI;
+ }
+ assert(AI == Fn->arg_end() && "Argument mismatch!");
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
+ llvm::Value *ReturnValue) {
+ llvm::Value *RV = 0;
+
+ // Functions with no result always return void.
+ if (ReturnValue) {
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Do nothing; aggregrates get evaluated directly into the destination.
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, RetTy);
+ }
+ break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ // The internal return value temp always will have
+ // pointer-to-return-type type.
+ RV = Builder.CreateLoad(ReturnValue);
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce:
+ RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+ }
+
+ if (RV) {
+ Builder.CreateRet(RV);
+ } else {
+ Builder.CreateRetVoid();
+ }
+}
+
+RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
+ // StartFunction converted the ABI-lowered parameter(s) into a
+ // local alloca. We need to turn that into an r-value suitable
+ // for EmitCall.
+ llvm::Value *Local = GetAddrOfLocalVar(Param);
+
+ QualType ArgType = Param->getType();
+
+ // For the most part, we just need to load the alloca, except:
+ // 1) aggregate r-values are actually pointers to temporaries, and
+ // 2) references to aggregates are pointers directly to the aggregate.
+ // I don't know why references to non-aggregates are different here.
+ if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
+ if (hasAggregateLLVMType(RefType->getPointeeType()))
+ return RValue::getAggregate(Local);
+
+ // Locals which are references to scalars are represented
+ // with allocas holding the pointer.
+ return RValue::get(Builder.CreateLoad(Local));
+ }
+
+ if (ArgType->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
+
+ if (hasAggregateLLVMType(ArgType))
+ return RValue::getAggregate(Local);
+
+ return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
+}
+
+RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
+ if (ArgType->isReferenceType())
+ return EmitReferenceBindingToExpr(E);
+
+ return EmitAnyExprToTemp(E);
+}
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &CallArgs,
+ const Decl *TargetDecl,
+ llvm::Instruction **callOrInvoke) {
+ // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ llvm::SmallVector<llvm::Value*, 16> Args;
+
+ // Handle struct-return functions by passing a pointer to the
+ // location that we would like to return into.
+ QualType RetTy = CallInfo.getReturnType();
+ const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+
+
+ // If the call returns a temporary with struct return, create a temporary
+ // alloca to hold the result, unless one is given to us.
+ if (CGM.ReturnTypeUsesSret(CallInfo)) {
+ llvm::Value *Value = ReturnValue.getValue();
+ if (!Value)
+ Value = CreateMemTemp(RetTy);
+ Args.push_back(Value);
+ }
+
+ assert(CallInfo.arg_size() == CallArgs.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ I != E; ++I, ++info_it) {
+ const ABIArgInfo &ArgInfo = info_it->info;
+ RValue RV = I->first;
+
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RV.isScalar() || RV.isComplex()) {
+ // Make a temporary alloca to pass the argument.
+ Args.push_back(CreateMemTemp(I->second));
+ if (RV.isScalar())
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
+ else
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ } else {
+ Args.push_back(RV.getAggregateAddr());
+ }
+ break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ if (RV.isScalar()) {
+ Args.push_back(RV.getScalarVal());
+ } else if (RV.isComplex()) {
+ llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
+ Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
+ Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
+ Args.push_back(Tmp);
+ } else {
+ Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ }
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce: {
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *SrcPtr;
+ if (RV.isScalar()) {
+ SrcPtr = CreateMemTemp(I->second, "coerce");
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
+ } else if (RV.isComplex()) {
+ SrcPtr = CreateMemTemp(I->second, "coerce");
+ StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+ } else
+ SrcPtr = RV.getAggregateAddr();
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ ExpandTypeToArgs(I->second, RV, Args);
+ break;
+ }
+ }
+
+ // If the callee is a bitcast of a function to a varargs pointer to function
+ // type, check to see if we can remove the bitcast. This handles some cases
+ // with unprototyped functions.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
+ if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
+ const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+ const llvm::FunctionType *CurFT =
+ cast<llvm::FunctionType>(CurPT->getElementType());
+ const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+
+ if (CE->getOpcode() == llvm::Instruction::BitCast &&
+ ActualFT->getReturnType() == CurFT->getReturnType() &&
+ ActualFT->getNumParams() == CurFT->getNumParams() &&
+ ActualFT->getNumParams() == Args.size()) {
+ bool ArgsMatch = true;
+ for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
+ if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
+ ArgsMatch = false;
+ break;
+ }
+
+ // Strip the cast if we can get away with it. This is a nice cleanup,
+ // but also allows us to inline the function at -O0 if it is marked
+ // always_inline.
+ if (ArgsMatch)
+ Callee = CalleeF;
+ }
+ }
+
+
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+ unsigned CallingConv;
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.end());
+
+ llvm::CallSite CS;
+ if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
+ CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
+ } else {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
+ Args.data(), Args.data()+Args.size());
+ EmitBlock(Cont);
+ }
+ if (callOrInvoke) {
+ *callOrInvoke = CS.getInstruction();
+ }
+
+ CS.setAttributes(Attrs);
+ CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+ // If the call doesn't return, finish the basic block and clear the
+ // insertion point; this allows the rest of IRgen to discard
+ // unreachable code.
+ if (CS.doesNotReturn()) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in
+ // generally are not ready to handle emitting expressions at unreachable
+ // points.
+ EnsureInsertPoint();
+
+ // Return a reasonable RValue.
+ return GetUndefRValue(RetTy);
+ }
+
+ llvm::Instruction *CI = CS.getInstruction();
+ if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
+ CI->setName("call");
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(Args[0]);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ Builder.CreateStore(CI, DestPtr, DestIsVolatile);
+ return RValue::getAggregate(DestPtr);
+ }
+ return RValue::get(CI);
+
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Coerce: {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
+ }
+
+ CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(DestPtr);
+ return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
+ }
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ assert(0 && "Unhandled ABIArgInfo::Kind");
+ return RValue::get(0);
+}
+
+/* VarArg handling */
+
+llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
+ return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..31c8aac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -0,0 +1,166 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCALL_H
+#define CLANG_CODEGEN_CGCALL_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Value.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+
+#include "CGValue.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+ struct AttributeWithIndex;
+ class Function;
+ class Type;
+ class Value;
+
+ template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class FunctionDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+
+ /// CallArgList - Type for representing both the value and type of
+ /// arguments in a call.
+ typedef llvm::SmallVector<std::pair<RValue, QualType>, 16> CallArgList;
+
+ /// FunctionArgList - Type for representing both the decl and type
+ /// of parameters to a function. The decl must be either a
+ /// ParmVarDecl or ImplicitParamDecl.
+ typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
+ 16> FunctionArgList;
+
+ /// CGFunctionInfo - Class to encapsulate the information about a
+ /// function definition.
+ class CGFunctionInfo : public llvm::FoldingSetNode {
+ struct ArgInfo {
+ CanQualType type;
+ ABIArgInfo info;
+ };
+
+ /// The LLVM::CallingConv to use for this function (as specified by the
+ /// user).
+ unsigned CallingConvention;
+
+ /// The LLVM::CallingConv to actually use for this function, which may
+ /// depend on the ABI.
+ unsigned EffectiveCallingConvention;
+
+ /// Whether this function is noreturn.
+ bool NoReturn;
+
+ unsigned NumArgs;
+ ArgInfo *Args;
+
+ /// How many arguments to pass inreg.
+ unsigned RegParm;
+
+ public:
+ typedef const ArgInfo *const_arg_iterator;
+ typedef ArgInfo *arg_iterator;
+
+ CGFunctionInfo(unsigned CallingConvention,
+ bool NoReturn,
+ unsigned RegParm,
+ CanQualType ResTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys);
+ ~CGFunctionInfo() { delete[] Args; }
+
+ const_arg_iterator arg_begin() const { return Args + 1; }
+ const_arg_iterator arg_end() const { return Args + 1 + NumArgs; }
+ arg_iterator arg_begin() { return Args + 1; }
+ arg_iterator arg_end() { return Args + 1 + NumArgs; }
+
+ unsigned arg_size() const { return NumArgs; }
+
+ bool isNoReturn() const { return NoReturn; }
+
+ /// getCallingConvention - Return the user specified calling
+ /// convention.
+ unsigned getCallingConvention() const { return CallingConvention; }
+
+ /// getEffectiveCallingConvention - Return the actual calling convention to
+ /// use, which may depend on the ABI.
+ unsigned getEffectiveCallingConvention() const {
+ return EffectiveCallingConvention;
+ }
+ void setEffectiveCallingConvention(unsigned Value) {
+ EffectiveCallingConvention = Value;
+ }
+
+ unsigned getRegParm() const { return RegParm; }
+
+ CanQualType getReturnType() const { return Args[0].type; }
+
+ ABIArgInfo &getReturnInfo() { return Args[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(getCallingConvention());
+ ID.AddBoolean(NoReturn);
+ ID.AddInteger(RegParm);
+ getReturnType().Profile(ID);
+ for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
+ it->type.Profile(ID);
+ }
+ template<class Iterator>
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const FunctionType::ExtInfo &Info,
+ CanQualType ResTy,
+ Iterator begin,
+ Iterator end) {
+ ID.AddInteger(Info.getCC());
+ ID.AddBoolean(Info.getNoReturn());
+ ID.AddInteger(Info.getRegParm());
+ ResTy.Profile(ID);
+ for (; begin != end; ++begin) {
+ CanQualType T = *begin; // force iterator to be over canonical types
+ T.Profile(ID);
+ }
+ }
+ };
+
+ /// ReturnValueSlot - Contains the address where the return value of a
+ /// function can be stored, and whether the address is volatile or not.
+ class ReturnValueSlot {
+ llvm::PointerIntPair<llvm::Value *, 1, bool> Value;
+
+ public:
+ ReturnValueSlot() {}
+ ReturnValueSlot(llvm::Value *Value, bool IsVolatile)
+ : Value(Value, IsVolatile) {}
+
+ bool isNull() const { return !getValue(); }
+
+ bool isVolatile() const { return Value.getInt(); }
+ llvm::Value *getValue() const { return Value.getPointer(); }
+ };
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
new file mode 100644
index 0000000..bebea54
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -0,0 +1,1325 @@
+//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of classes
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtCXX.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t
+ComputeNonVirtualBaseClassOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedClass,
+ CXXBaseSpecifierArray::iterator Start,
+ CXXBaseSpecifierArray::iterator End) {
+ uint64_t Offset = 0;
+
+ const CXXRecordDecl *RD = DerivedClass;
+
+ for (CXXBaseSpecifierArray::iterator I = Start; I != End; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ assert(!Base->isVirtual() && "Should not see virtual bases here!");
+
+ // Get the layout.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ // Add the offset.
+ Offset += Layout.getBaseClassOffset(BaseDecl);
+
+ RD = BaseDecl;
+ }
+
+ // FIXME: We should not use / 8 here.
+ return Offset / 8;
+}
+
+llvm::Constant *
+CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ const CXXBaseSpecifierArray &BasePath) {
+ assert(!BasePath.empty() && "Base path should not be empty!");
+
+ uint64_t Offset =
+ ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
+ BasePath.begin(), BasePath.end());
+ if (!Offset)
+ return 0;
+
+ const llvm::Type *PtrDiffTy =
+ Types.ConvertType(getContext().getPointerDiffType());
+
+ return llvm::ConstantInt::get(PtrDiffTy, Offset);
+}
+
+/// Gets the address of a direct base class within a complete object.
+/// This should only be used for (1) non-virtual bases or (2) virtual bases
+/// when the type is known to be complete (e.g. in complete destructors).
+///
+/// The object pointed to by 'This' is assumed to be non-null.
+llvm::Value *
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual) {
+ // 'this' must be a pointer (in some address space) to Derived.
+ assert(This->getType()->isPointerTy() &&
+ cast<llvm::PointerType>(This->getType())->getElementType()
+ == ConvertType(Derived));
+
+ // Compute the offset of the virtual base.
+ uint64_t Offset;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+ if (BaseIsVirtual)
+ Offset = Layout.getVBaseClassOffset(Base);
+ else
+ Offset = Layout.getBaseClassOffset(Base);
+
+ // Shift and cast down to the base type.
+ // TODO: for complete types, this should be possible with a GEP.
+ llvm::Value *V = This;
+ if (Offset) {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ V = Builder.CreateConstInBoundsGEP1_64(V, Offset / 8);
+ }
+ V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
+
+ return V;
+}
+
+static llvm::Value *
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
+ uint64_t NonVirtual, llvm::Value *Virtual) {
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Value *NonVirtualOffset = 0;
+ if (NonVirtual)
+ NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy, NonVirtual);
+
+ llvm::Value *BaseOffset;
+ if (Virtual) {
+ if (NonVirtualOffset)
+ BaseOffset = CGF.Builder.CreateAdd(Virtual, NonVirtualOffset);
+ else
+ BaseOffset = Virtual;
+ } else
+ BaseOffset = NonVirtualOffset;
+
+ // Apply the base offset.
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
+
+ return ThisPtr;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
+ bool NullCheckValue) {
+ assert(!BasePath.empty() && "Base path should not be empty!");
+
+ CXXBaseSpecifierArray::iterator Start = BasePath.begin();
+ const CXXRecordDecl *VBase = 0;
+
+ // Get the virtual base.
+ if ((*Start)->isVirtual()) {
+ VBase =
+ cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
+ ++Start;
+ }
+
+ uint64_t NonVirtualOffset =
+ ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
+ Start, BasePath.end());
+
+ // Get the base pointer type.
+ const llvm::Type *BasePtrTy =
+ ConvertType((BasePath.end()[-1])->getType())->getPointerTo();
+
+ if (!NonVirtualOffset && !VBase) {
+ // Just cast back.
+ return Builder.CreateBitCast(Value, BasePtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(Value,
+ llvm::Constant::getNullValue(Value->getType()));
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ llvm::Value *VirtualOffset = 0;
+
+ if (VBase)
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+
+ // Apply the offsets.
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
+ VirtualOffset);
+
+ // Cast back.
+ Value = Builder.CreateBitCast(Value, BasePtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
+ bool NullCheckValue) {
+ assert(!BasePath.empty() && "Base path should not be empty!");
+
+ QualType DerivedTy =
+ getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+
+ llvm::Value *NonVirtualOffset =
+ CGM.GetNonVirtualBaseClassOffset(Derived, BasePath);
+
+ if (!NonVirtualOffset) {
+ // No offset, we can just cast back.
+ return Builder.CreateBitCast(Value, DerivedPtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(Value,
+ llvm::Constant::getNullValue(Value->getType()));
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ // Apply the offset.
+ Value = Builder.CreatePtrToInt(Value, NonVirtualOffset->getType());
+ Value = Builder.CreateSub(Value, NonVirtualOffset);
+ Value = Builder.CreateIntToPtr(Value, DerivedPtrTy);
+
+ // Just cast.
+ Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+/// GetVTTParameter - Return the VTT parameter that should be passed to a
+/// base constructor/destructor with virtual bases.
+static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
+ bool ForVirtualBase) {
+ if (!CodeGenVTables::needsVTTParameter(GD)) {
+ // This constructor/destructor does not need a VTT parameter.
+ return 0;
+ }
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
+ const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ llvm::Value *VTT;
+
+ uint64_t SubVTTIndex;
+
+ // If the record matches the base, this is the complete ctor/dtor
+ // variant calling the base variant in a class with virtual bases.
+ if (RD == Base) {
+ assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) &&
+ "doing no-op VTT offset in base dtor/ctor?");
+ assert(!ForVirtualBase && "Can't have same class as virtual base!");
+ SubVTTIndex = 0;
+ } else {
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(RD);
+ uint64_t BaseOffset = ForVirtualBase ?
+ Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base);
+
+ SubVTTIndex =
+ CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
+ assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+ }
+
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ // A VTT parameter was passed to the constructor, use it.
+ VTT = CGF.LoadCXXVTT();
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+ } else {
+ // We're the complete constructor, so get the VTT by name.
+ VTT = CGF.CGM.getVTables().getVTT(RD);
+ VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+ }
+
+ return VTT;
+}
+
+static void EmitBaseInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXBaseOrMemberInitializer *BaseInit,
+ CXXCtorType CtorType) {
+ assert(BaseInit->isBaseInitializer() &&
+ "Must have base initializer!");
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+
+ const Type *BaseType = BaseInit->getBaseClass();
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ bool isBaseVirtual = BaseInit->isBaseVirtual();
+
+ // The base constructor doesn't construct virtual bases.
+ if (CtorType == Ctor_Base && isBaseVirtual)
+ return;
+
+ // We can pretend to be a complete class because it only matters for
+ // virtual bases, and we only do virtual bases for complete ctors.
+ llvm::Value *V =
+ CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
+ BaseClassDecl,
+ BaseInit->isBaseVirtual());
+
+ CGF.EmitAggExpr(BaseInit->getInit(), V, false, false, true);
+
+ if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) {
+ // FIXME: Is this OK for C++0x delegating constructors?
+ CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+
+ CXXDestructorDecl *DD = BaseClassDecl->getDestructor(CGF.getContext());
+ CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V);
+ }
+}
+
+static void EmitAggMemberInitializer(CodeGenFunction &CGF,
+ LValue LHS,
+ llvm::Value *ArrayIndexVar,
+ CXXBaseOrMemberInitializer *MemberInit,
+ QualType T,
+ unsigned Index) {
+ if (Index == MemberInit->getNumArrayIndices()) {
+ CodeGenFunction::CleanupScope Cleanups(CGF);
+
+ llvm::Value *Dest = LHS.getAddress();
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+ }
+
+ CGF.EmitAggExpr(MemberInit->getInit(), Dest,
+ LHS.isVolatileQualified(),
+ /*IgnoreResult*/ false,
+ /*IsInitializer*/ true);
+
+ return;
+ }
+
+ const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
+ assert(Array && "Array initialization without the array type?");
+ llvm::Value *IndexVar
+ = CGF.GetAddrOfLocalVar(MemberInit->getArrayIndex(Index));
+ assert(IndexVar && "Array index variable not loaded");
+
+ // Initialize this index variable to zero.
+ llvm::Value* Zero
+ = llvm::Constant::getNullValue(
+ CGF.ConvertType(CGF.getContext().getSizeType()));
+ CGF.Builder.CreateStore(Zero, IndexVar);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
+
+ CGF.EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements) fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = Array->getSize().getZExtValue();
+ llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
+ llvm::Value *NumElementsPtr =
+ llvm::ConstantInt::get(Counter->getType(), NumElements);
+ llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+
+ // If the condition is true, execute the body.
+ CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ CGF.EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
+
+ {
+ CodeGenFunction::CleanupScope Cleanups(CGF);
+
+ // Inside the loop body recurse to emit the inner loop or, eventually, the
+ // constructor call.
+ EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit,
+ Array->getElementType(), Index + 1);
+ }
+
+ CGF.EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = CGF.Builder.CreateLoad(IndexVar);
+ NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
+ CGF.Builder.CreateStore(NextVal, IndexVar);
+
+ // Finally, branch back up to the condition for the next iteration.
+ CGF.EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ CGF.EmitBlock(AfterFor, true);
+}
+
+static void EmitMemberInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXBaseOrMemberInitializer *MemberInit,
+ const CXXConstructorDecl *Constructor,
+ FunctionArgList &Args) {
+ assert(MemberInit->isMemberInitializer() &&
+ "Must have member initializer!");
+
+ // non-static data member initializers.
+ FieldDecl *Field = MemberInit->getMember();
+ QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS;
+
+ // If we are initializing an anonymous union field, drill down to the field.
+ if (MemberInit->getAnonUnionMember()) {
+ Field = MemberInit->getAnonUnionMember();
+ LHS = CGF.EmitLValueForAnonRecordField(ThisPtr, Field, 0);
+ FieldType = Field->getType();
+ } else {
+ LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
+ }
+
+ // FIXME: If there's no initializer and the CXXBaseOrMemberInitializer
+ // was implicitly generated, we shouldn't be zeroing memory.
+ RValue RHS;
+ if (FieldType->isReferenceType()) {
+ RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(),
+ /*IsInitializer=*/true);
+ CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ } else if (FieldType->isArrayType() && !MemberInit->getInit()) {
+ CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
+ } else if (!CGF.hasAggregateLLVMType(Field->getType())) {
+ RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit(), true));
+ CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ } else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
+ LHS.isVolatileQualified());
+ } else {
+ llvm::Value *ArrayIndexVar = 0;
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicit() &&
+ Constructor->isCopyConstructor()) {
+ const llvm::Type *SizeTy
+ = CGF.ConvertType(CGF.getContext().getSizeType());
+
+ // The LHS is a pointer to the first object we'll be constructing, as
+ // a flat array.
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = LValue::MakeAddr(BaseAddrPtr, CGF.MakeQualifiers(BaseElementTy));
+
+ // Create an array index that will be used to walk over all of the
+ // objects we're constructing.
+ ArrayIndexVar = CGF.CreateTempAlloca(SizeTy, "object.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ CGF.Builder.CreateStore(Zero, ArrayIndexVar);
+
+ // If we are copying an array of scalars or classes with trivial copy
+ // constructors, perform a single aggregate copy.
+ const RecordType *Record = BaseElementTy->getAs<RecordType>();
+ if (!Record ||
+ cast<CXXRecordDecl>(Record->getDecl())->hasTrivialCopyConstructor()) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(Args[SrcArgIndex].first));
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+
+ // Emit the block variables for the array indices, if any.
+ for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
+ CGF.EmitLocalBlockVarDecl(*MemberInit->getArrayIndex(I));
+ }
+
+ EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
+
+ if (!CGF.Exceptions)
+ return;
+
+ // FIXME: If we have an array of classes w/ non-trivial destructors,
+ // we need to destroy in reverse order of construction along the exception
+ // path.
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD->hasTrivialDestructor()) {
+ // FIXME: Is this OK for C++0x delegating constructors?
+ CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
+
+ CXXDestructorDecl *DD = RD->getDestructor(CGF.getContext());
+ CGF.EmitCXXDestructorCall(DD, Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
+ }
+ }
+}
+
+/// Checks whether the given constructor is a valid subject for the
+/// complete-to-base constructor delegation optimization, i.e.
+/// emitting the complete constructor as a simple call to the base
+/// constructor.
+static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
+
+ // Currently we disable the optimization for classes with virtual
+ // bases because (1) the addresses of parameter variables need to be
+ // consistent across all initializers but (2) the delegate function
+ // call necessarily creates a second copy of the parameter variable.
+ //
+ // The limiting example (purely theoretical AFAIK):
+ // struct A { A(int &c) { c++; } };
+ // struct B : virtual A {
+ // B(int count) : A(count) { printf("%d\n", count); }
+ // };
+ // ...although even this example could in principle be emitted as a
+ // delegation since the address of the parameter doesn't escape.
+ if (Ctor->getParent()->getNumVBases()) {
+ // TODO: white-list trivial vbase initializers. This case wouldn't
+ // be subject to the restrictions below.
+
+ // TODO: white-list cases where:
+ // - there are no non-reference parameters to the constructor
+ // - the initializers don't access any non-reference parameters
+ // - the initializers don't take the address of non-reference
+ // parameters
+ // - etc.
+ // If we ever add any of the above cases, remember that:
+ // - function-try-blocks will always blacklist this optimization
+ // - we need to perform the constructor prologue and cleanup in
+ // EmitConstructorBody.
+
+ return false;
+ }
+
+ // We also disable the optimization for variadic functions because
+ // it's impossible to "re-pass" varargs.
+ if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
+ return false;
+
+ return true;
+}
+
+/// EmitConstructorBody - Emits the body of the current constructor.
+void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
+ CXXCtorType CtorType = CurGD.getCtorType();
+
+ // Before we go any further, try the complete->base constructor
+ // delegation optimization.
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
+ return;
+ }
+
+ Stmt *Body = Ctor->getBody();
+
+ // Enter the function-try-block before the constructor prologue if
+ // applicable.
+ CXXTryStmtInfo TryInfo;
+ bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
+
+ if (IsTryBody)
+ TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+
+ unsigned CleanupStackSize = CleanupEntries.size();
+
+ // Emit the constructor prologue, i.e. the base and member
+ // initializers.
+ EmitCtorPrologue(Ctor, CtorType, Args);
+
+ // Emit the body of the statement.
+ if (IsTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+
+ // Emit any cleanup blocks associated with the member or base
+ // initializers, which includes (along the exceptional path) the
+ // destructors for those members and bases that were fully
+ // constructed.
+ EmitCleanupBlocks(CleanupStackSize);
+
+ if (IsTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+}
+
+/// EmitCtorPrologue - This routine generates necessary code to initialize
+/// base classes and non-static data members belonging to this constructor.
+void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
+ CXXCtorType CtorType,
+ FunctionArgList &Args) {
+ const CXXRecordDecl *ClassDecl = CD->getParent();
+
+ llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> MemberInitializers;
+
+ for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
+ E = CD->init_end();
+ B != E; ++B) {
+ CXXBaseOrMemberInitializer *Member = (*B);
+
+ assert(LiveTemporaries.empty() &&
+ "Should not have any live temporaries at initializer start!");
+
+ if (Member->isBaseInitializer())
+ EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
+ else
+ MemberInitializers.push_back(Member);
+ }
+
+ InitializeVTablePointers(ClassDecl);
+
+ for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) {
+ assert(LiveTemporaries.empty() &&
+ "Should not have any live temporaries at initializer start!");
+
+ EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
+ }
+}
+
+/// EmitDestructorBody - Emits the body of the current destructor.
+void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
+ CXXDtorType DtorType = CurGD.getDtorType();
+
+ Stmt *Body = Dtor->getBody();
+
+ // If the body is a function-try-block, enter the try before
+ // anything else --- unless we're in a deleting destructor, in which
+ // case we're just going to call the complete destructor and then
+ // call operator delete() on the way out.
+ CXXTryStmtInfo TryInfo;
+ bool isTryBody = (DtorType != Dtor_Deleting &&
+ Body && isa<CXXTryStmt>(Body));
+ if (isTryBody)
+ TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+
+ llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue");
+ PushCleanupBlock(DtorEpilogue);
+
+ bool SkipBody = false; // should get jump-threaded
+
+ // If this is the deleting variant, just invoke the complete
+ // variant, then call the appropriate operator delete() on the way
+ // out.
+ if (DtorType == Dtor_Deleting) {
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ SkipBody = true;
+
+ // If this is the complete variant, just invoke the base variant;
+ // the epilogue will destruct the virtual bases. But we can't do
+ // this optimization if the body is a function-try-block, because
+ // we'd introduce *two* handler blocks.
+ } else if (!isTryBody && DtorType == Dtor_Complete) {
+ EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ SkipBody = true;
+
+ // Otherwise, we're in the base variant, so we need to ensure the
+ // vtable ptrs are right before emitting the body.
+ } else {
+ InitializeVTablePointers(Dtor->getParent());
+ }
+
+ // Emit the body of the statement.
+ if (SkipBody)
+ (void) 0;
+ else if (isTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Dtor->isImplicit() && "bodyless dtor not implicit");
+ // nothing to do besides what's in the epilogue
+ }
+
+ // Jump to the cleanup block.
+ CleanupBlockInfo Info = PopCleanupBlock();
+ assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
+ EmitBlock(DtorEpilogue);
+
+ // Emit the destructor epilogue now. If this is a complete
+ // destructor with a function-try-block, perform the base epilogue
+ // as well.
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+
+ // Link up the cleanup information.
+ if (Info.SwitchBlock)
+ EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ EmitBlock(Info.EndBlock);
+
+ // Exit the try if applicable.
+ if (isTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+}
+
+/// EmitDtorEpilogue - Emit all code that comes at the end of class's
+/// destructor. This is to call destructors on members and base classes
+/// in reverse order of their construction.
+void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
+ CXXDtorType DtorType) {
+ assert(!DD->isTrivial() &&
+ "Should not emit dtor epilogue for trivial dtor!");
+
+ const CXXRecordDecl *ClassDecl = DD->getParent();
+
+ // In a deleting destructor, we've already called the complete
+ // destructor as a subroutine, so we just have to delete the
+ // appropriate value.
+ if (DtorType == Dtor_Deleting) {
+ assert(DD->getOperatorDelete() &&
+ "operator delete missing - EmitDtorEpilogue");
+ EmitDeleteCall(DD->getOperatorDelete(), LoadCXXThis(),
+ getContext().getTagDeclType(ClassDecl));
+ return;
+ }
+
+ // For complete destructors, we've already called the base
+ // destructor (in GenerateBody), so we just need to destruct all the
+ // virtual bases.
+ if (DtorType == Dtor_Complete) {
+ // Handle virtual bases.
+ for (CXXRecordDecl::reverse_base_class_const_iterator I =
+ ClassDecl->vbases_rbegin(), E = ClassDecl->vbases_rend();
+ I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ llvm::Value *V =
+ GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(),
+ ClassDecl, BaseClassDecl,
+ /*BaseIsVirtual=*/true);
+ EmitCXXDestructorCall(D, Dtor_Base, /*ForVirtualBase=*/true, V);
+ }
+ return;
+ }
+
+ assert(DtorType == Dtor_Base);
+
+ // Collect the fields.
+ llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ QualType FieldType = getContext().getCanonicalType(Field->getType());
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ continue;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (FieldClassDecl->hasTrivialDestructor())
+ continue;
+
+ FieldDecls.push_back(Field);
+ }
+
+ // Now destroy the fields.
+ for (size_t i = FieldDecls.size(); i > 0; --i) {
+ const FieldDecl *Field = FieldDecls[i - 1];
+
+ QualType FieldType = Field->getType();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+
+ llvm::Value *ThisPtr = LoadCXXThis();
+
+ LValue LHS = EmitLValueForField(ThisPtr, Field,
+ // FIXME: Qualifiers?
+ /*CVRQualifiers=*/0);
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ Array, BaseAddrPtr);
+ } else
+ EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
+ }
+
+ // Destroy non-virtual bases.
+ for (CXXRecordDecl::reverse_base_class_const_iterator I =
+ ClassDecl->bases_rbegin(), E = ClassDecl->bases_rend(); I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+
+ // Ignore virtual bases.
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ llvm::Value *V =
+ GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(), ClassDecl,
+ BaseClassDecl,
+ /*BaseIsVirtual=*/false);
+
+ EmitCXXDestructorCall(D, Dtor_Base, /*ForVirtualBase=*/false, V);
+ }
+}
+
+/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
+/// for-loop to call the default constructor on individual members of the
+/// array.
+/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
+/// array type and 'ArrayPtr' points to the beginning fo the array.
+/// It is assumed that all relevant checks have been made by the caller.
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value * NumElements =
+ llvm::ConstantInt::get(SizeTy,
+ getContext().getConstantArrayElementCount(ArrayTy));
+
+ EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd);
+}
+
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
+ "arrayidx");
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+
+ // Keep track of the current number of live temporaries.
+ {
+ CXXTemporariesCleanupScope Scope(*this);
+
+ EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
+ ArgBeg, ArgEnd);
+ }
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+ assert(CA && "Do we support VLA for destruction ?");
+ uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
+
+ const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+ llvm::Value* ElementCountPtr = llvm::ConstantInt::get(SizeLTy, ElementCount);
+ EmitCXXAggrDestructorCall(D, ElementCountPtr, This);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ llvm::Value *UpperCount,
+ llvm::Value *This) {
+ const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+ llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
+
+ // Create a temporary for the loop index and initialize it with count of
+ // array elements.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeLTy, "loop.index");
+
+ // Store the number of elements in the index pointer.
+ Builder.CreateStore(UpperCount, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index != 0 fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value* zeroConstant =
+ llvm::Constant::getNullValue(SizeLTy);
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
+ "isne");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsNE, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Address);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the decrement of the loop counter.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One, "dec");
+ Builder.CreateStore(Counter, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
+/// invoked, calls the default destructor on array elements in reverse order of
+/// construction.
+llvm::Constant *
+CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ FunctionArgList Args;
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ llvm::SmallString<16> Name;
+ llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount);
+ QualType R = getContext().VoidTy;
+ const CGFunctionInfo &FI
+ = CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
+ const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name.str(),
+ &CGM.getModule());
+ IdentifierInfo *II = &CGM.getContext().Idents.get(Name.str());
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, true);
+ StartFunction(FD, R, Fn, Args, SourceLocation());
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+ FinishFunction();
+ llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
+ 0);
+ llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
+ return m;
+}
+
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isTrivial()) {
+ if (ArgBeg == ArgEnd) {
+ // Trivial default constructor, no codegen required.
+ assert(D->isDefaultConstructor() &&
+ "trivial 0-arg ctor not a default ctor");
+ return;
+ }
+
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyConstructor() && "trivial 1-arg ctor not a copy ctor");
+
+ const Expr *E = (*ArgBeg);
+ QualType Ty = E->getType();
+ llvm::Value *Src = EmitLValue(E).getAddress();
+ EmitAggregateCopy(This, Src, Ty);
+ return;
+ }
+
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+}
+
+void
+CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args) {
+ CallArgList DelegateArgs;
+
+ FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ assert(I != E && "no parameters to constructor");
+
+ // this
+ DelegateArgs.push_back(std::make_pair(RValue::get(LoadCXXThis()),
+ I->second));
+ ++I;
+
+ // vtt
+ if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType),
+ /*ForVirtualBase=*/false)) {
+ QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
+ DelegateArgs.push_back(std::make_pair(RValue::get(VTT), VoidPP));
+
+ if (CodeGenVTables::needsVTTParameter(CurGD)) {
+ assert(I != E && "cannot skip vtt parameter, already done with args");
+ assert(I->second == VoidPP && "skipping parameter not of vtt type");
+ ++I;
+ }
+ }
+
+ // Explicit arguments.
+ for (; I != E; ++I) {
+ const VarDecl *Param = I->first;
+ QualType ArgType = Param->getType(); // because we're passing it to itself
+ RValue Arg = EmitDelegateCallArg(Param);
+
+ DelegateArgs.push_back(std::make_pair(Arg, ArgType));
+ }
+
+ EmitCall(CGM.getTypes().getFunctionInfo(Ctor, CtorType),
+ CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
+ ReturnValueSlot(), DelegateArgs, Ctor);
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ llvm::Value *This) {
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
+ ForVirtualBase);
+ llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+
+ EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+}
+
+llvm::Value *
+CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ llvm::Value *VTablePtr = Builder.CreateBitCast(This,
+ Int8PtrTy->getPointerTo());
+ VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
+
+ int64_t VBaseOffsetOffset =
+ CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
+
+ llvm::Value *VBaseOffsetPtr =
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset, "vbase.offset.ptr");
+ const llvm::Type *PtrDiffTy =
+ ConvertType(getContext().getPointerDiffType());
+
+ VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
+ PtrDiffTy->getPointerTo());
+
+ llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+
+ return VBaseOffset;
+}
+
+void
+CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Compute the address point.
+ llvm::Value *VTableAddressPoint;
+
+ // Check if we need to use a vtable from the VTT.
+ if (CodeGenVTables::needsVTTParameter(CurGD) &&
+ (RD->getNumVBases() || NearestVBase)) {
+ // Get the secondary vpointer index.
+ uint64_t VirtualPointerIndex =
+ CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
+
+ /// Load the VTT.
+ llvm::Value *VTT = LoadCXXVTT();
+ if (VirtualPointerIndex)
+ VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
+
+ // And load the address point from the VTT.
+ VTableAddressPoint = Builder.CreateLoad(VTT);
+ } else {
+ uint64_t AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ VTableAddressPoint =
+ Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
+ }
+
+ // Compute where to store the address point.
+ llvm::Value *VirtualOffset = 0;
+ uint64_t NonVirtualOffset = 0;
+
+ if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
+ // We need to use the virtual base offset offset because the virtual base
+ // might have a different offset in the most derived class.
+ VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
+ NearestVBase);
+ NonVirtualOffset = OffsetFromNearestVBase / 8;
+ } else {
+ // We can just use the base offset in the complete class.
+ NonVirtualOffset = Base.getBaseOffset() / 8;
+ }
+
+ // Apply the offsets.
+ llvm::Value *VTableField = LoadCXXThis();
+
+ if (NonVirtualOffset || VirtualOffset)
+ VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
+ NonVirtualOffset,
+ VirtualOffset);
+
+ // Finally, store the address point.
+ const llvm::Type *AddressPointPtrTy =
+ VTableAddressPoint->getType()->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
+ Builder.CreateStore(VTableAddressPoint, VTableField);
+}
+
+void
+CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!BaseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
+ VTable, VTableClass);
+ }
+
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Traverse bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore classes without a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ uint64_t BaseOffset;
+ uint64_t BaseOffsetFromNearestVBase;
+ bool BaseDeclIsNonVirtualPrimaryBase;
+
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(VTableClass);
+
+ BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase = 0;
+ BaseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase =
+ OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
+ BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
+ }
+
+ InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
+ I->isVirtual() ? BaseDecl : NearestVBase,
+ BaseOffsetFromNearestVBase,
+ BaseDeclIsNonVirtualPrimaryBase,
+ VTable, VTableClass, VBases);
+ }
+}
+
+void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
+ // Ignore classes without a vtable.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Get the VTable.
+ llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+
+ // Initialize the vtable pointers for this class and all of its bases.
+ VisitedVirtualBasesSetTy VBases;
+ InitializeVTablePointers(BaseSubobject(RD, 0), /*NearestVBase=*/0,
+ /*OffsetFromNearestVBase=*/0,
+ /*BaseIsNonVirtualPrimaryBase=*/false,
+ VTable, RD, VBases);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..c9bcb1b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,1644 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/System/Path.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
+ : CGM(CGM), DebugFactory(CGM.getModule()),
+ FwdDeclCount(0), BlockLiteralGenericSet(false) {
+ CreateCompileUnit();
+}
+
+CGDebugInfo::~CGDebugInfo() {
+ assert(RegionStack.empty() && "Region stack mismatch, stack not empty!");
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+ if (Loc.isValid())
+ CurLoc = CGM.getContext().getSourceManager().getInstantiationLoc(Loc);
+}
+
+/// getContextDescriptor - Get context info for the decl.
+llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context,
+ llvm::DIDescriptor &CompileUnit) {
+ if (!Context)
+ return CompileUnit;
+
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl, CompileUnit));
+
+ if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
+ if (!RDecl->isDependentType()) {
+ llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ llvm::DIFile(CompileUnit));
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return CompileUnit;
+}
+
+/// getFunctionName - Get function name for the given FunctionDecl. If the
+/// name is constructred on demand (e.g. C++ destructor) then the name
+/// is stored on the side.
+llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+ assert (FD && "Invalid FunctionDecl!");
+ IdentifierInfo *FII = FD->getIdentifier();
+ if (FII)
+ return FII->getName();
+
+ // Otherwise construct human readable name for debug info.
+ std::string NS = FD->getNameAsString();
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
+ memcpy(StrPtr, NS.data(), NS.length());
+ return llvm::StringRef(StrPtr, NS.length());
+}
+
+/// getOrCreateFile - Get the file debug info descriptor for the input location.
+llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
+ if (!Loc.isValid())
+ // If Location is not valid then use main input file.
+ return DebugFactory.CreateFile(TheCU.getFilename(), TheCU.getDirectory(),
+ TheCU);
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ // Cache the results.
+ const char *fname = PLoc.getFilename();
+ llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
+ DIFileCache.find(fname);
+
+ if (it != DIFileCache.end()) {
+ // Verify that the information still exists.
+ if (&*it->second)
+ return llvm::DIFile(cast<llvm::MDNode>(it->second));
+ }
+
+ // FIXME: We shouldn't even need to call 'makeAbsolute()' in the cases
+ // where we can consult the FileEntry.
+ llvm::sys::Path AbsFileName(PLoc.getFilename());
+ AbsFileName.makeAbsolute();
+
+ llvm::DIFile F = DebugFactory.CreateFile(AbsFileName.getLast(),
+ AbsFileName.getDirname(), TheCU);
+
+ DIFileCache[fname] = F;
+ return F;
+
+}
+
+/// getLineNumber - Get line number for the location. If location is invalid
+/// then use current location.
+unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
+ assert (CurLoc.isValid() && "Invalid current location!");
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.getLine();
+}
+
+/// getColumnNumber - Get column number for the location. If location is
+/// invalid then use current location.
+unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+ assert (CurLoc.isValid() && "Invalid current location!");
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.getColumn();
+}
+
+/// CreateCompileUnit - Create new compile unit.
+void CGDebugInfo::CreateCompileUnit() {
+
+ // Get absolute path name.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
+ if (MainFileName.empty())
+ MainFileName = "<unknown>";
+
+ llvm::sys::Path AbsFileName(MainFileName);
+ AbsFileName.makeAbsolute();
+
+ // The main file name provided via the "-main-file-name" option contains just
+ // the file name itself with no path information. This file name may have had
+ // a relative path, so we look into the actual file entry for the main
+ // file to determine the real absolute path for the file.
+ std::string MainFileDir;
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID()))
+ MainFileDir = MainFile->getDir()->getName();
+ else
+ MainFileDir = AbsFileName.getDirname();
+
+ unsigned LangTag;
+ const LangOptions &LO = CGM.getLangOptions();
+ if (LO.CPlusPlus) {
+ if (LO.ObjC1)
+ LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ } else if (LO.ObjC1) {
+ LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.C99) {
+ LangTag = llvm::dwarf::DW_LANG_C99;
+ } else {
+ LangTag = llvm::dwarf::DW_LANG_C89;
+ }
+
+ const char *Producer =
+#ifdef CLANG_VENDOR
+ CLANG_VENDOR
+#endif
+ "clang " CLANG_VERSION_STRING;
+
+ // Figure out which version of the ObjC runtime we have.
+ unsigned RuntimeVers = 0;
+ if (LO.ObjC1)
+ RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+
+ // Create new compile unit.
+ TheCU = DebugFactory.CreateCompileUnit(
+ LangTag, AbsFileName.getLast(), MainFileDir, Producer, true,
+ LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
+}
+
+/// CreateType - Get the Basic type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
+ llvm::DIFile Unit) {
+ unsigned Encoding = 0;
+ switch (BT->getKind()) {
+ default:
+ case BuiltinType::Void:
+ return llvm::DIType();
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
+ case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::Float:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ }
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(BT);
+ uint64_t Align = CGM.getContext().getTypeAlign(BT);
+ uint64_t Offset = 0;
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateBasicType(Unit,
+ BT->getName(CGM.getContext().getLangOptions()),
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
+ llvm::DIFile Unit) {
+ // Bit size, align and offset of the type.
+ unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
+ if (Ty->isComplexIntegerType())
+ Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ uint64_t Offset = 0;
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateBasicType(Unit, "complex",
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+ return DbgTy;
+}
+
+/// CreateCVRType - Get the qualified type from the cache or create
+/// a new one if necessary.
+llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
+ QualifierCollector Qc;
+ const Type *T = Qc.strip(Ty);
+
+ // Ignore these qualifiers for now.
+ Qc.removeObjCGCAttr();
+ Qc.removeAddressSpace();
+
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ unsigned Tag;
+ if (Qc.hasConst()) {
+ Tag = llvm::dwarf::DW_TAG_const_type;
+ Qc.removeConst();
+ } else if (Qc.hasVolatile()) {
+ Tag = llvm::dwarf::DW_TAG_volatile_type;
+ Qc.removeVolatile();
+ } else if (Qc.hasRestrict()) {
+ Tag = llvm::dwarf::DW_TAG_restrict_type;
+ Qc.removeRestrict();
+ } else {
+ assert(Qc.empty() && "Unknown type qualifier for debug info");
+ return getOrCreateType(QualType(T, 0), Unit);
+ }
+
+ llvm::DIType FromTy = getOrCreateType(Qc.apply(T), Unit);
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ llvm::DIType DbgTy =
+ DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
+ 0, 0, 0, 0, 0, FromTy);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile Unit) {
+ llvm::DIType DbgTy =
+ CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
+ const Type *Ty,
+ QualType PointeeTy,
+ llvm::DIFile Unit) {
+ llvm::DIType EltTy = getOrCreateType(PointeeTy, Unit);
+
+ // Bit size, align and offset of the type.
+
+ // Size is always the size of a pointer. We can't use getTypeSize here
+ // because that does not return the correct value for references.
+ uint64_t Size =
+ CGM.getContext().Target.getPointerWidth(PointeeTy.getAddressSpace());
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return
+ DebugFactory.CreateDerivedType(Tag, Unit, "", Unit,
+ 0, Size, Align, 0, 0, EltTy);
+
+}
+
+llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
+ llvm::DIFile Unit) {
+ if (BlockLiteralGenericSet)
+ return BlockLiteralGeneric;
+
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ llvm::DIType FieldTy;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIArray Elements;
+ llvm::DIType EltTy, DescTy;
+
+ FieldOffset = 0;
+ FType = CGM.getContext().UnsignedLongTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
+
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ EltTys.clear();
+
+ unsigned Flags = llvm::DIType::FlagAppleBlock;
+ unsigned LineNo = getLineNumber(CurLoc);
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
+ Unit, LineNo, FieldOffset, 0, 0,
+ Flags, llvm::DIType(), Elements);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", Unit,
+ LineNo, Size, Align, 0, 0, EltTy);
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
+
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ FieldTy = DescTy;
+ FieldSize = CGM.getContext().getTypeSize(Ty);
+ FieldAlign = CGM.getContext().getTypeAlign(Ty);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__descriptor", Unit,
+ LineNo, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
+ Unit, LineNo, FieldOffset, 0, 0,
+ Flags, llvm::DIType(), Elements);
+
+ BlockLiteralGenericSet = true;
+ BlockLiteralGeneric
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+ "", Unit,
+ LineNo, Size, Align, 0, 0, EltTy);
+ return BlockLiteralGeneric;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
+ llvm::DIFile Unit) {
+ // Typedefs are derived from some other type. If we have a typedef of a
+ // typedef, make sure to emit the whole chain.
+ llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+
+ // We don't set size information, but do specify where the typedef was
+ // declared.
+ unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
+
+ llvm::DIDescriptor TyContext
+ = getContextDescriptor(dyn_cast<Decl>(Ty->getDecl()->getDeclContext()),
+ Unit);
+ llvm::DIType DbgTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef,
+ TyContext,
+ Ty->getDecl()->getName(), Unit,
+ Line, 0, 0, 0, 0, Src);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
+ llvm::DIFile Unit) {
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+
+ // Set up remainder of arguments if there is a prototype.
+ // FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
+ if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ } else {
+ // FIXME: Handle () case in C. llvm-gcc doesn't do it either.
+ }
+
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
+ return DbgTy;
+}
+
+/// CollectRecordFields - A helper function to collect debug info for
+/// record fields. This is used while creating debug info entry for a Record.
+void CGDebugInfo::
+CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ unsigned FieldNo = 0;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+ llvm::StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields. Do not ignore unnamed records.
+ if (FieldName.empty() && !isa<RecordType>(Field->getType()))
+ continue;
+
+ // Get the location for the field.
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ AccessSpecifier Access = I->getAccess();
+ if (Access == clang::AS_private)
+ Flags |= llvm::DIType::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ Flags |= llvm::DIType::FlagProtected;
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+}
+
+/// getOrCreateMethodType - CXXMethodDecl's type is a FunctionType. This
+/// function type is not updated to include implicit "this" pointer. Use this
+/// routine to get a method type which includes "this" pointer.
+llvm::DIType
+CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile Unit) {
+ llvm::DIType FnTy
+ = getOrCreateType(QualType(Method->getType()->getAs<FunctionProtoType>(),
+ 0),
+ Unit);
+
+ // Static methods do not need "this" pointer argument.
+ if (Method->isStatic())
+ return FnTy;
+
+ // Add "this" pointer.
+
+ llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
+ assert (Args.getNumElements() && "Invalid number of arguments!");
+
+ llvm::SmallVector<llvm::DIDescriptor, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(Args.getElement(0));
+
+ // "this" pointer is always first argument.
+ ASTContext &Context = CGM.getContext();
+ QualType ThisPtr =
+ Context.getPointerType(Context.getTagDeclType(Method->getParent()));
+ llvm::DIType ThisPtrType =
+ DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ Elts.push_back(ThisPtrType);
+
+ // Copy rest of the arguments.
+ for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
+ Elts.push_back(Args.getElement(i));
+
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(Elts.data(), Elts.size());
+
+ return
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
+}
+
+/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
+/// a single member function GlobalDecl.
+llvm::DISubprogram
+CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile Unit,
+ llvm::DICompositeType &RecordTy) {
+ bool IsCtorOrDtor =
+ isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+
+ llvm::StringRef MethodName = getFunctionName(Method);
+ llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
+
+ // Since a single ctor/dtor corresponds to multiple functions, it doesn't
+ // make sense to give a single ctor/dtor a linkage name.
+ MangleBuffer MethodLinkageName;
+ if (!IsCtorOrDtor)
+ CGM.getMangledName(MethodLinkageName, Method);
+
+ // Get the location for the method.
+ llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
+ unsigned MethodLine = getLineNumber(Method->getLocation());
+
+ // Collect virtual method info.
+ llvm::DIType ContainingType;
+ unsigned Virtuality = 0;
+ unsigned VIndex = 0;
+
+ if (Method->isVirtual()) {
+ if (Method->isPure())
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+ else
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+
+ // It doesn't make sense to give a virtual destructor a vtable index,
+ // since a single destructor has two entries in the vtable.
+ if (!isa<CXXDestructorDecl>(Method))
+ VIndex = CGM.getVTables().getMethodVTableIndex(Method);
+ ContainingType = RecordTy;
+ }
+
+ llvm::DISubprogram SP =
+ DebugFactory.CreateSubprogram(RecordTy , MethodName, MethodName,
+ MethodLinkageName,
+ MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ Method->isThisDeclarationADefinition(),
+ Virtuality, VIndex, ContainingType);
+
+ // Don't cache ctors or dtors since we have to emit multiple functions for
+ // a single ctor or dtor.
+ if (!IsCtorOrDtor && Method->isThisDeclarationADefinition())
+ SPCache[Method] = llvm::WeakVH(SP);
+
+ return SP;
+}
+
+/// CollectCXXMemberFunctions - A helper function to collect debug info for
+/// C++ member functions.This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DICompositeType &RecordTy) {
+ for(CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *Method = *I;
+
+ if (Method->isImplicit() && !Method->isUsed())
+ continue;
+
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ }
+}
+
+/// CollectCXXBases - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DICompositeType &RecordTy) {
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
+ BE = RD->bases_end(); BI != BE; ++BI) {
+ unsigned BFlags = 0;
+ uint64_t BaseOffset;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
+
+ if (BI->isVirtual()) {
+ // virtual base offset offset is -ve. The code generator emits dwarf
+ // expression where it expects +ve number.
+ BaseOffset = 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base);
+ BFlags = llvm::DIType::FlagVirtual;
+ } else
+ BaseOffset = RL.getBaseClassOffset(Base);
+
+ AccessSpecifier Access = BI->getAccessSpecifier();
+ if (Access == clang::AS_private)
+ BFlags |= llvm::DIType::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ BFlags |= llvm::DIType::FlagProtected;
+
+ llvm::DIType DTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+ RecordTy, llvm::StringRef(),
+ Unit, 0, 0, 0,
+ BaseOffset, BFlags,
+ getOrCreateType(BI->getType(),
+ Unit));
+ EltTys.push_back(DTy);
+ }
+}
+
+/// getOrCreateVTablePtrType - Return debug info descriptor for vtable.
+llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
+ if (VTablePtrType.isValid())
+ return VTablePtrType;
+
+ ASTContext &Context = CGM.getContext();
+
+ /* Function type */
+ llvm::DIDescriptor STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DIArray SElements = DebugFactory.GetOrCreateArray(&STy, 1);
+ llvm::DIType SubTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0, llvm::DIType(), SElements);
+
+ unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
+ llvm::DIType vtbl_ptr_type
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "__vtbl_ptr_type", Unit,
+ 0, Size, 0, 0, 0, SubTy);
+
+ VTablePtrType =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", Unit,
+ 0, Size, 0, 0, 0, vtbl_ptr_type);
+ return VTablePtrType;
+}
+
+/// getVTableName - Get vtable name for the given Class.
+llvm::StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
+ // Otherwise construct gdb compatible name name.
+ std::string Name = "_vptr$" + RD->getNameAsString();
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(Name.length());
+ memcpy(StrPtr, Name.data(), Name.length());
+ return llvm::StringRef(StrPtr, Name.length());
+}
+
+
+/// CollectVTableInfo - If the C++ class has vtable info then insert appropriate
+/// debug info entry in EltTys vector.
+void CGDebugInfo::
+CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+
+ // If there is a primary base then it will hold vtable info.
+ if (RL.getPrimaryBase())
+ return;
+
+ // If this class is not dynamic then there is not any vtable info to collect.
+ if (!RD->isDynamicClass())
+ return;
+
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ llvm::DIType VPTR
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ getVTableName(RD), Unit,
+ 0, Size, 0, 0, 0,
+ getOrCreateVTablePtrType(Unit));
+ EltTys.push_back(VPTR);
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
+ llvm::DIFile Unit) {
+ RecordDecl *RD = Ty->getDecl();
+
+ unsigned Tag;
+ if (RD->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (RD->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else {
+ assert(RD->isClass() && "Unknown RecordType!");
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+
+ // A RD->getName() is not unique. However, the debug info descriptors
+ // are uniqued so use type name to ensure uniquness.
+ llvm::SmallString<128> FwdDeclName;
+ llvm::raw_svector_ostream(FwdDeclName) << "fwd.type." << FwdDeclCount++;
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, FDContext, FwdDeclName,
+ DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray());
+
+ // If this is just a forward declaration, return it.
+ if (!RD->getDefinition())
+ return FwdDecl;
+
+ llvm::MDNode *MN = FwdDecl;
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ // Push the struct on region stack.
+ RegionStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ if (CXXDecl) {
+ CollectCXXBases(CXXDecl, Unit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, Unit, EltTys);
+ }
+ CollectRecordFields(RD, Unit, EltTys);
+ llvm::MDNode *ContainingType = NULL;
+ if (CXXDecl) {
+ CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
+
+ // A class's primary base or the class itself contains the vtable.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase())
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = FwdDecl;
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ RegionStack.pop_back();
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
+ RegionMap.find(Ty->getDecl());
+ if (RI != RegionMap.end())
+ RegionMap.erase(RI);
+
+ llvm::DIDescriptor RDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+ llvm::DICompositeType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, RDContext,
+ RD->getName(),
+ DefUnit, Line, Size, Align, 0, 0,
+ llvm::DIType(), Elements,
+ 0, ContainingType);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+ RegionMap[RD] = llvm::WeakVH(RealDecl);
+ return RealDecl;
+}
+
+/// CreateType - get objective-c object type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
+ llvm::DIFile Unit) {
+ // Ignore protocols.
+ return getOrCreateType(Ty->getBaseType(), Unit);
+}
+
+/// CreateType - get objective-c interface type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+ llvm::DIFile Unit) {
+ ObjCInterfaceDecl *ID = Ty->getDecl();
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
+ unsigned Line = getLineNumber(ID->getLocation());
+ unsigned RuntimeLang = TheCU.getLanguage();
+
+ // To handle recursive interface, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(),
+ DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray(),
+ RuntimeLang);
+
+ // If this is just a forward declaration, return it.
+ if (ID->isForwardDecl())
+ return FwdDecl;
+
+ llvm::MDNode *MN = FwdDecl;
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ // Push the struct on region stack.
+ RegionStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ ObjCInterfaceDecl *SClass = ID->getSuperClass();
+ if (SClass) {
+ llvm::DIType SClassTy =
+ getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ llvm::DIType InhTag =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+ Unit, "", Unit, 0, 0, 0,
+ 0 /* offset */, 0, SClassTy);
+ EltTys.push_back(InhTag);
+ }
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
+
+ unsigned FieldNo = 0;
+ for (ObjCInterfaceDecl::ivar_iterator I = ID->ivar_begin(),
+ E = ID->ivar_end(); I != E; ++I, ++FieldNo) {
+ ObjCIvarDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+ llvm::StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+ Flags = llvm::DIType::FlagProtected;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+ Flags = llvm::DIType::FlagPrivate;
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ RegionStack.pop_back();
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
+ RegionMap.find(Ty->getDecl());
+ if (RI != RegionMap.end())
+ RegionMap.erase(RI);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ llvm::DICompositeType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(), DefUnit,
+ Line, Size, Align, 0, 0, llvm::DIType(),
+ Elements, RuntimeLang);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+ RegionMap[ID] = llvm::WeakVH(RealDecl);
+
+ return RealDecl;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
+ llvm::DIFile Unit) {
+ EnumDecl *ED = Ty->getDecl();
+
+ llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+
+ // Create DIEnumerator elements for each enumerator.
+ for (EnumDecl::enumerator_iterator
+ Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
+ Enum != EnumEnd; ++Enum) {
+ Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(),
+ Enum->getInitVal().getZExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DIArray EltArray =
+ DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+
+ // Size and align of the type.
+ uint64_t Size = 0;
+ unsigned Align = 0;
+ if (!Ty->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(Ty);
+ Align = CGM.getContext().getTypeAlign(Ty);
+ }
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
+ Unit, ED->getName(), DefUnit, Line,
+ Size, Align, 0, 0,
+ llvm::DIType(), EltArray);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
+ llvm::DIFile Unit) {
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return CreateType(RT, Unit);
+ else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
+ return CreateType(ET, Unit);
+
+ return llvm::DIType();
+}
+
+llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
+ llvm::DIFile Unit) {
+ llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ uint64_t NumElems = Ty->getNumElements();
+ if (NumElems > 0)
+ --NumElems;
+
+ llvm::DIDescriptor Subscript = DebugFactory.GetOrCreateSubrange(0, NumElems);
+ llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(&Subscript, 1);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_vector_type,
+ Unit, "", Unit,
+ 0, Size, Align, 0, 0,
+ ElementTy, SubscriptArray);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
+ llvm::DIFile Unit) {
+ uint64_t Size;
+ uint64_t Align;
+
+
+ // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ Size = 0;
+ Align =
+ CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ } else if (Ty->isIncompleteArrayType()) {
+ Size = 0;
+ Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ } else {
+ // Size and align of the whole array, not the element type.
+ Size = CGM.getContext().getTypeSize(Ty);
+ Align = CGM.getContext().getTypeAlign(Ty);
+ }
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+ QualType EltTy(Ty, 0);
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ uint64_t Upper = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ if (CAT->getSize().getZExtValue())
+ Upper = CAT->getSize().getZExtValue() - 1;
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
+ EltTy = Ty->getElementType();
+ }
+
+ llvm::DIArray SubscriptArray =
+ DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+
+ llvm::DIType DbgTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
+ Unit, "", Unit,
+ 0, Size, Align, 0, 0,
+ getOrCreateType(EltTy, Unit),
+ SubscriptArray);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
+ llvm::DIFile U) {
+ QualType PointerDiffTy = CGM.getContext().getPointerDiffType();
+ llvm::DIType PointerDiffDITy = getOrCreateType(PointerDiffTy, U);
+
+ if (!Ty->getPointeeType()->isFunctionType()) {
+ // We have a data member pointer type.
+ return PointerDiffDITy;
+ }
+
+ // We have a member function pointer type. Treat it as a struct with two
+ // ptrdiff_t members.
+ std::pair<uint64_t, unsigned> Info = CGM.getContext().getTypeInfo(Ty);
+
+ uint64_t FieldOffset = 0;
+ llvm::DIDescriptor ElementTypes[2];
+
+ // FIXME: This should probably be a function type instead.
+ ElementTypes[0] =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
+ "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+ FieldOffset += Info.first;
+
+ ElementTypes[1] =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
+ "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(&ElementTypes[0],
+ llvm::array_lengthof(ElementTypes));
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ U, llvm::StringRef("test"),
+ U, 0, FieldOffset,
+ 0, 0, 0, llvm::DIType(), Elements);
+}
+
+static QualType UnwrapTypeForDebugInfo(QualType T) {
+ do {
+ QualType LastT = T;
+ switch (T->getTypeClass()) {
+ default:
+ return T;
+ case Type::TemplateSpecialization:
+ T = cast<TemplateSpecializationType>(T)->desugar();
+ break;
+ case Type::TypeOfExpr: {
+ TypeOfExprType *Ty = cast<TypeOfExprType>(T);
+ T = Ty->getUnderlyingExpr()->getType();
+ break;
+ }
+ case Type::TypeOf:
+ T = cast<TypeOfType>(T)->getUnderlyingType();
+ break;
+ case Type::Decltype:
+ T = cast<DecltypeType>(T)->getUnderlyingType();
+ break;
+ case Type::Elaborated:
+ T = cast<ElaboratedType>(T)->getNamedType();
+ break;
+ case Type::SubstTemplateTypeParm:
+ T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+ break;
+ }
+
+ assert(T != LastT && "Type unwrapping failed to unwrap!");
+ if (T == LastT)
+ return T;
+ } while (true);
+
+ return T;
+}
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
+ llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(Ty.getAsOpaquePtr());
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateTypeNode(Ty, Unit);
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+/// CreateTypeNode - Create a new debug type node.
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
+ llvm::DIFile Unit) {
+ // Handle qualifiers, which recursively handles what they refer to.
+ if (Ty.hasLocalQualifiers())
+ return CreateQualifiedType(Ty, Unit);
+
+ const char *Diag = 0;
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Dependent types cannot show up in debug information");
+
+ // FIXME: Handle these.
+ case Type::ExtVector:
+ return llvm::DIType();
+
+ case Type::Vector:
+ return CreateType(cast<VectorType>(Ty), Unit);
+ case Type::ObjCObjectPointer:
+ return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+ case Type::ObjCObject:
+ return CreateType(cast<ObjCObjectType>(Ty), Unit);
+ case Type::ObjCInterface:
+ return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin: return CreateType(cast<BuiltinType>(Ty), Unit);
+ case Type::Complex: return CreateType(cast<ComplexType>(Ty), Unit);
+ case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::BlockPointer:
+ return CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Record:
+ case Type::Enum:
+ return CreateType(cast<TagType>(Ty), Unit);
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return CreateType(cast<FunctionType>(Ty), Unit);
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ return CreateType(cast<ArrayType>(Ty), Unit);
+
+ case Type::LValueReference:
+ return CreateType(cast<LValueReferenceType>(Ty), Unit);
+
+ case Type::MemberPointer:
+ return CreateType(cast<MemberPointerType>(Ty), Unit);
+
+ case Type::TemplateSpecialization:
+ case Type::Elaborated:
+ case Type::SubstTemplateTypeParm:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ llvm_unreachable("type should have been unwrapped!");
+ return llvm::DIType();
+
+ case Type::RValueReference:
+ // FIXME: Implement!
+ Diag = "rvalue references";
+ break;
+ }
+
+ assert(Diag && "Fall through without a diagnostic?");
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(Diagnostic::Error,
+ "debug information for %0 is not yet supported");
+ CGM.getDiags().Report(FullSourceLoc(), DiagID)
+ << Diag;
+ return llvm::DIType();
+}
+
+/// CreateMemberType - Create new member and increase Offset by FType's size.
+llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
+ llvm::StringRef Name,
+ uint64_t *Offset) {
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
+ unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
+ llvm::DIType Ty = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
+ Unit, Name, Unit, 0,
+ FieldSize, FieldAlign,
+ *Offset, 0, FieldTy);
+ *Offset += FieldSize;
+ return Ty;
+}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function -
+/// "llvm.dbg.func.start.".
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn,
+ CGBuilderTy &Builder) {
+
+ llvm::StringRef Name;
+ MangleBuffer LinkageName;
+
+ const Decl *D = GD.getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // If there is a DISubprogram for this function available then use it.
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ FI = SPCache.find(FD);
+ if (FI != SPCache.end()) {
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
+ if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
+ llvm::MDNode *SPN = SP;
+ RegionStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+ return;
+ }
+ }
+ Name = getFunctionName(FD);
+ // Use mangled name as linkage name for c/c++ functions.
+ CGM.getMangledName(LinkageName, GD);
+ } else {
+ // Use llvm function name as linkage name.
+ Name = Fn->getName();
+ LinkageName.setString(Name);
+ }
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
+
+ // It is expected that CurLoc is set before using EmitFunctionStart.
+ // Usually, CurLoc points to the left bracket location of compound
+ // statement representing function body.
+ llvm::DIFile Unit = getOrCreateFile(CurLoc);
+ unsigned LineNo = getLineNumber(CurLoc);
+
+ llvm::DISubprogram SP =
+ DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
+ getOrCreateType(FnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/);
+
+ // Push function on region stack.
+ llvm::MDNode *SPN = SP;
+ RegionStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+}
+
+
+void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
+ if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
+
+ // Don't bother if things are the same as last time.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ if (CurLoc == PrevLoc
+ || (SM.getInstantiationLineNumber(CurLoc) ==
+ SM.getInstantiationLineNumber(PrevLoc)
+ && SM.isFromSameFile(CurLoc, PrevLoc)))
+ // New Builder may not be in sync with CGDebugInfo.
+ if (!Builder.getCurrentDebugLocation().isUnknown())
+ return;
+
+ // Update last state.
+ PrevLoc = CurLoc;
+
+ llvm::MDNode *Scope = RegionStack.back();
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(CurLoc),
+ getColumnNumber(CurLoc),
+ Scope));
+}
+
+/// EmitRegionStart- Constructs the debug code for entering a declarative
+/// region - "llvm.dbg.region.start.".
+void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
+ llvm::DIDescriptor D =
+ DebugFactory.CreateLexicalBlock(RegionStack.empty() ?
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(RegionStack.back()),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
+ llvm::MDNode *DN = D;
+ RegionStack.push_back(DN);
+}
+
+/// EmitRegionEnd - Constructs the debug code for exiting a declarative
+/// region - "llvm.dbg.region.end."
+void CGDebugInfo::EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Provide an region stop point.
+ EmitStopPoint(Fn, Builder);
+
+ RegionStack.pop_back();
+}
+
+// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+// See BuildByRefType.
+llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *XOffset) {
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ QualType Type = VD->getType();
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
+
+ bool HasCopyAndDispose = CGM.BlockRequiresCopying(Type);
+ if (HasCopyAndDispose) {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
+ &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__destroy_helper",
+ &FieldOffset));
+ }
+
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ if (Align > CharUnits::fromQuantity(
+ CGM.getContext().Target.getPointerAlign(0) / 8)) {
+ unsigned AlignedOffsetInBytes
+ = llvm::RoundUpToAlignment(FieldOffset/8, Align.getQuantity());
+ unsigned NumPaddingBytes
+ = AlignedOffsetInBytes - FieldOffset/8;
+
+ if (NumPaddingBytes > 0) {
+ llvm::APInt pad(32, NumPaddingBytes);
+ FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
+ pad, ArrayType::Normal, 0);
+ EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
+ }
+ }
+
+ FType = Type;
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ FieldAlign = Align.getQuantity()*8;
+
+ *XOffset = FieldOffset;
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ VD->getName(), Unit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "", Unit,
+ 0, FieldOffset, 0, 0, Flags,
+ llvm::DIType(), Elements);
+
+}
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
+ llvm::Value *Storage, CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ uint64_t XOffset = 0;
+ if (VD->hasAttr<BlocksAttr>())
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // If there is not any debug info for type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return;
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(),
+ Unit, Line, Ty);
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ llvm::MDNode *Scope = RegionStack.back();
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
+ llvm::Value *Storage, CGBuilderTy &Builder,
+ CodeGenFunction *CGF) {
+ const ValueDecl *VD = BDRE->getDecl();
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ if (Builder.GetInsertBlock() == 0)
+ return;
+
+ uint64_t XOffset = 0;
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ if (VD->hasAttr<BlocksAttr>())
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+
+ CharUnits offset = CGF->BlockDecls[VD];
+ llvm::SmallVector<llvm::Value *, 9> addr;
+ const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ if (BDRE->isByRef()) {
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ // offset of __forwarding field
+ offset = CharUnits::fromQuantity(CGF->LLVMPointerWidth/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+ // offset of x field
+ offset = CharUnits::fromQuantity(XOffset/8);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ }
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateComplexVariable(Tag,
+ llvm::DIDescriptor(RegionStack.back()),
+ VD->getName(), Unit, Line, Ty,
+ addr);
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+ llvm::MDNode *Scope = RegionStack.back();
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder);
+}
+
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const BlockDeclRefExpr *BDRE, llvm::Value *Storage, CGBuilderTy &Builder,
+ CodeGenFunction *CGF) {
+ EmitDeclare(BDRE, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder, CGF);
+}
+
+/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+/// variable declaration.
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, Builder);
+}
+
+
+
+/// EmitGlobalVariable - Emit information about a global variable.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ const VarDecl *D) {
+
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(D->getLocation());
+ unsigned LineNo = getLineNumber(D->getLocation());
+
+ QualType T = D->getType();
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+ llvm::StringRef DeclName = D->getName();
+ llvm::StringRef LinkageName;
+ if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext()))
+ LinkageName = Var->getName();
+ llvm::DIDescriptor DContext =
+ getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
+ DebugFactory.CreateGlobalVariable(DContext, DeclName, DeclName, LinkageName,
+ Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
+/// EmitGlobalVariable - Emit information about an objective-c interface.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ ObjCInterfaceDecl *ID) {
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
+ unsigned LineNo = getLineNumber(ID->getLocation());
+
+ llvm::StringRef Name = ID->getName();
+
+ QualType T = CGM.getContext().getObjCInterfaceType(ID);
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
+/// getOrCreateNamesSpace - Return namespace descriptor for the given
+/// namespace decl.
+llvm::DINameSpace
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl,
+ llvm::DIDescriptor Unit) {
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
+ NameSpaceCache.find(NSDecl);
+ if (I != NameSpaceCache.end())
+ return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
+
+ unsigned LineNo = getLineNumber(NSDecl->getLocation());
+
+ llvm::DIDescriptor Context =
+ getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()), Unit);
+ llvm::DINameSpace NS =
+ DebugFactory.CreateNameSpace(Context, NSDecl->getName(),
+ llvm::DIFile(Unit), LineNo);
+ NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
+ return NS;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..620a5f2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,224 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
+#define CLANG_CODEGEN_CGDEBUGINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+
+#include "CGBuilder.h"
+
+namespace llvm {
+ class MDNode;
+}
+
+namespace clang {
+ class VarDecl;
+ class ObjCInterfaceDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenFunction;
+ class GlobalDecl;
+
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
+/// the backend.
+class CGDebugInfo {
+ CodeGenModule &CGM;
+ llvm::DIFactory DebugFactory;
+ llvm::DICompileUnit TheCU;
+ SourceLocation CurLoc, PrevLoc;
+ llvm::DIType VTablePtrType;
+ /// FwdDeclCount - This counter is used to ensure unique names for forward
+ /// record decls.
+ unsigned FwdDeclCount;
+
+ /// TypeCache - Cache of previously constructed Types.
+ llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
+
+ bool BlockLiteralGenericSet;
+ llvm::DIType BlockLiteralGeneric;
+
+ std::vector<llvm::TrackingVH<llvm::MDNode> > RegionStack;
+ llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+
+ /// DebugInfoNames - This is a storage for names that are
+ /// constructed on demand. For example, C++ destructors, C++ operators etc..
+ llvm::BumpPtrAllocator DebugInfoNames;
+
+ llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
+
+ /// Helper functions for getOrCreateType.
+ llvm::DIType CreateType(const BuiltinType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ComplexType *Ty, llvm::DIFile F);
+ llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile F);
+ llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TagType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RecordType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const EnumType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
+ llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile F);
+ llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
+ llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N,
+ llvm::DIDescriptor Unit);
+
+ llvm::DIType CreatePointerLikeType(unsigned Tag,
+ const Type *Ty, QualType PointeeTy,
+ llvm::DIFile F);
+
+ llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile F,
+ llvm::DICompositeType &RecordTy);
+
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &E,
+ llvm::DICompositeType &T);
+ void CollectCXXBases(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DICompositeType &RecordTy);
+
+
+ void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &E);
+
+ void CollectVTableInfo(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys);
+
+public:
+ CGDebugInfo(CodeGenModule &CGM);
+ ~CGDebugInfo();
+
+ /// setLocation - Update the current source location. If \arg loc is
+ /// invalid it is ignored.
+ void setLocation(SourceLocation Loc);
+
+ /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
+ /// source line.
+ void EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
+ /// start of a new function.
+ void EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
+ /// of a new block.
+ void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
+ /// block.
+ void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
+ /// variable declaration.
+ void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
+ /// imported variable declaration in a block.
+ void EmitDeclareOfBlockDeclRefVariable(const BlockDeclRefExpr *BDRE,
+ llvm::Value *AI,
+ CGBuilderTy &Builder,
+ CodeGenFunction *CGF);
+
+ /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+ /// variable declaration.
+ void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitGlobalVariable - Emit information about a global variable.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+ /// EmitGlobalVariable - Emit information about an objective-c interface.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+
+private:
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder, CodeGenFunction *CGF);
+
+ // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+ // See BuildByRefType.
+ llvm::DIType EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *OffSet);
+
+ /// getContextDescriptor - Get context info for the decl.
+ llvm::DIDescriptor getContextDescriptor(const Decl *Decl,
+ llvm::DIDescriptor &CU);
+
+ /// CreateCompileUnit - Create new compile unit.
+ void CreateCompileUnit();
+
+ /// getOrCreateFile - Get the file debug info descriptor for the input
+ /// location.
+ llvm::DIFile getOrCreateFile(SourceLocation Loc);
+
+ /// getOrCreateType - Get the type from the cache or create a new type if
+ /// necessary.
+ llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
+
+ /// CreateTypeNode - Create type metadata for a source language type.
+ llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+
+ /// CreateMemberType - Create new member and increase Offset by FType's size.
+ llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
+ llvm::StringRef Name, uint64_t *Offset);
+
+ /// getFunctionName - Get function name for the given FunctionDecl. If the
+ /// name is constructred on demand (e.g. C++ destructor) then the name
+ /// is stored on the side.
+ llvm::StringRef getFunctionName(const FunctionDecl *FD);
+
+ /// getVTableName - Get vtable name for the given Class.
+ llvm::StringRef getVTableName(const CXXRecordDecl *Decl);
+
+ /// getLineNumber - Get line number for the location. If location is invalid
+ /// then use current location.
+ unsigned getLineNumber(SourceLocation Loc);
+
+ /// getColumnNumber - Get column number for the location. If location is
+ /// invalid then use current location.
+ unsigned getColumnNumber(SourceLocation Loc);
+};
+} // namespace CodeGen
+} // namespace clang
+
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..07edca0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,830 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+ switch (D.getKind()) {
+ case Decl::TranslationUnit:
+ case Decl::Namespace:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::TemplateTypeParm:
+ case Decl::UnresolvedUsingValue:
+ case Decl::NonTypeTemplateParm:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::Field:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ case Decl::ParmVar:
+ case Decl::ImplicitParam:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TemplateTemplateParm:
+ case Decl::ObjCMethod:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCProperty:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::LinkageSpec:
+ case Decl::ObjCPropertyImpl:
+ case Decl::ObjCClass:
+ case Decl::ObjCForwardProtocol:
+ case Decl::FileScopeAsm:
+ case Decl::Friend:
+ case Decl::FriendTemplate:
+ case Decl::Block:
+
+ assert(0 && "Declaration not should not be in declstmts!");
+ case Decl::Function: // void X();
+ case Decl::Record: // struct/union/class X;
+ case Decl::Enum: // enum X;
+ case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ case Decl::Using: // using X; [C++]
+ case Decl::UsingShadow:
+ case Decl::UsingDirective: // using namespace X; [C++]
+ case Decl::NamespaceAlias:
+ case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+ // None of these decls require codegen support.
+ return;
+
+ case Decl::Var: {
+ const VarDecl &VD = cast<VarDecl>(D);
+ assert(VD.isBlockVarDecl() &&
+ "Should not see file-scope variables inside a function!");
+ return EmitBlockVarDecl(VD);
+ }
+
+ case Decl::Typedef: { // typedef int X;
+ const TypedefDecl &TD = cast<TypedefDecl>(D);
+ QualType Ty = TD.getUnderlyingType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+ }
+}
+
+/// EmitBlockVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
+ if (D.hasAttr<AsmLabelAttr>())
+ CGM.ErrorUnsupported(&D, "__asm__");
+
+ switch (D.getStorageClass()) {
+ case VarDecl::None:
+ case VarDecl::Auto:
+ case VarDecl::Register:
+ return EmitLocalBlockVarDecl(D);
+ case VarDecl::Static: {
+ llvm::GlobalValue::LinkageTypes Linkage =
+ llvm::GlobalValue::InternalLinkage;
+
+ // If the function definition has some sort of weak linkage, its
+ // static variables should also be weak so that they get properly
+ // uniqued. We can't do this in C, though, because there's no
+ // standard way to agree on which variables are the same (i.e.
+ // there's no mangling).
+ if (getContext().getLangOptions().CPlusPlus)
+ if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
+ Linkage = CurFn->getLinkage();
+
+ return EmitStaticBlockVarDecl(D, Linkage);
+ }
+ case VarDecl::Extern:
+ case VarDecl::PrivateExtern:
+ // Don't emit it now, allow it to be emitted lazily on its first use.
+ return;
+ }
+
+ assert(0 && "Unknown storage class");
+}
+
+static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
+ const char *Separator) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (CGF.getContext().getLangOptions().CPlusPlus) {
+ MangleBuffer Name;
+ CGM.getMangledName(Name, &D);
+ return Name.getString().str();
+ }
+
+ std::string ContextName;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
+ MangleBuffer Name;
+ CGM.getMangledName(Name, FD);
+ ContextName = Name.getString().str();
+ } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
+ ContextName = CGF.CurFn->getName();
+ else
+ // FIXME: What about in a block??
+ assert(0 && "Unknown context for block var decl");
+
+ return ContextName + Separator + D.getNameAsString();
+}
+
+llvm::GlobalVariable *
+CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ QualType Ty = D.getType();
+ assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+ std::string Name = GetStaticDeclName(*this, D, Separator);
+
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), LTy,
+ Ty.isConstant(getContext()), Linkage,
+ CGM.EmitNullConstant(D.getType()), Name, 0,
+ D.isThreadSpecified(), Ty.getAddressSpace());
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ return GV;
+}
+
+/// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+/// global variable that has already been created for it. If the initializer
+/// has a different type than GV does, this may free GV and return a different
+/// one. Otherwise it just returns GV.
+llvm::GlobalVariable *
+CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
+
+ // If constant emission failed, then this should be a C++ static
+ // initializer.
+ if (!Init) {
+ if (!getContext().getLangOptions().CPlusPlus)
+ CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else {
+ // Since we have a static initializer, this global variable can't
+ // be constant.
+ GV->setConstant(false);
+
+ EmitStaticCXXBlockVarDeclInit(D, GV);
+ }
+ return GV;
+ }
+
+ // The initializer may differ in type from the global. Rewrite
+ // the global to match the initializer. (We have to do this
+ // because some types, like unions, can't be completely represented
+ // in the LLVM type system.)
+ if (GV->getType() != Init->getType()) {
+ llvm::GlobalVariable *OldGV = GV;
+
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ 0, D.isThreadSpecified(),
+ D.getType().getAddressSpace());
+
+ // Steal the name of the old global
+ GV->takeName(OldGV);
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ OldGV->eraseFromParent();
+ }
+
+ GV->setInitializer(Init);
+ return GV;
+}
+
+void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+
+ llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", Linkage);
+
+ // Store into LocalDeclMap before generating initializer to handle
+ // circular references.
+ DMEntry = GV;
+
+ // We can't have a VLA here, but we can have a pointer to a VLA,
+ // even though that doesn't really make any sense.
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (D.getType()->isVariablyModifiedType())
+ EmitVLASize(D.getType());
+
+ // If this value has an initializer, emit it.
+ if (D.getInit())
+ GV = AddInitializerToGlobalBlockVarDecl(D, GV);
+
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
+ // FIXME: Merge attribute handling.
+ if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ llvm::Constant *Ann =
+ CGM.EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D.getLocation()));
+ CGM.AddAnnotation(Ann);
+ }
+
+ if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ if (D.hasAttr<UsedAttr>())
+ CGM.AddUsedGlobal(GV);
+
+ if (getContext().getLangOptions().CPlusPlus)
+ CGM.setStaticLocalDeclAddress(&D, GV);
+
+ // We may have to cast the constant because of the initializer
+ // mismatch above.
+ //
+ // FIXME: It is really dangerous to store this in the map; if anyone
+ // RAUW's the GV uses of this constant will be invalid.
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
+ const llvm::Type *LPtrTy =
+ llvm::PointerType::get(LTy, D.getType().getAddressSpace());
+ DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
+
+ // Emit global variable debug descriptor for static vars.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(D.getLocation());
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
+ }
+}
+
+unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
+ assert(ByRefValueInfo.count(VD) && "Did not find value!");
+
+ return ByRefValueInfo.find(VD)->second.second;
+}
+
+/// BuildByRefType - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper; // only if needed
+/// void *__destroy_helper; // only if needed
+/// char padding[X]; // only if needed
+/// T x;
+/// } x
+///
+const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
+ std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+ if (Info.first)
+ return Info.first;
+
+ QualType Ty = D->getType();
+
+ std::vector<const llvm::Type *> Types;
+
+ const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext);
+
+ // void *__isa;
+ Types.push_back(Int8PtrTy);
+
+ // void *__forwarding;
+ Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+
+ // int32_t __flags;
+ Types.push_back(llvm::Type::getInt32Ty(VMContext));
+
+ // int32_t __size;
+ Types.push_back(llvm::Type::getInt32Ty(VMContext));
+
+ bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+ if (HasCopyAndDispose) {
+ /// void *__copy_helper;
+ Types.push_back(Int8PtrTy);
+
+ /// void *__destroy_helper;
+ Types.push_back(Int8PtrTy);
+ }
+
+ bool Packed = false;
+ CharUnits Align = getContext().getDeclAlign(D);
+ if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) {
+ // We have to insert padding.
+
+ // The struct above has 2 32-bit integers.
+ unsigned CurrentOffsetInBytes = 4 * 2;
+
+ // And either 2 or 4 pointers.
+ CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
+ CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+
+ // Align the offset.
+ unsigned AlignedOffsetInBytes =
+ llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
+
+ unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
+ if (NumPaddingBytes > 0) {
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ // FIXME: We need a sema error for alignment larger than the minimum of
+ // the maximal stack alignmint and the alignment of malloc on the system.
+ if (NumPaddingBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
+
+ Types.push_back(Ty);
+
+ // We want a packed struct.
+ Packed = true;
+ }
+ }
+
+ // T x;
+ Types.push_back(ConvertType(Ty));
+
+ const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
+
+ cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
+ CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
+ ByRefTypeHolder.get());
+
+ Info.first = ByRefTypeHolder.get();
+
+ Info.second = Types.size() - 1;
+
+ return Info.first;
+}
+
+/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
+ QualType Ty = D.getType();
+ bool isByRef = D.hasAttr<BlocksAttr>();
+ bool needsDispose = false;
+ CharUnits Align = CharUnits::Zero();
+ bool IsSimpleConstantInitializer = false;
+
+ bool NRVO = false;
+ llvm::Value *NRVOFlag = 0;
+ llvm::Value *DeclPtr;
+ if (Ty->isConstantSizeType()) {
+ if (!Target.useGlobalsForAutomaticVariables()) {
+ NRVO = getContext().getLangOptions().ElideConstructors &&
+ D.isNRVOVariable();
+ // If this value is an array or struct, is POD, and if the initializer is
+ // a staticly determinable constant, try to optimize it (unless the NRVO
+ // is already optimizing this).
+ if (D.getInit() && !isByRef &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ Ty->isPODType() &&
+ D.getInit()->isConstantInitializer(getContext()) && !NRVO) {
+ // If this variable is marked 'const', emit the value as a global.
+ if (CGM.getCodeGenOpts().MergeAllConstants &&
+ Ty.isConstant(getContext())) {
+ EmitStaticBlockVarDecl(D, llvm::GlobalValue::InternalLinkage);
+ return;
+ }
+
+ IsSimpleConstantInitializer = true;
+ }
+
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless it's an NRVO variable.
+ const llvm::Type *LTy = ConvertTypeForMem(Ty);
+
+ if (NRVO) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ DeclPtr = ReturnValue;
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ // Create a flag that is used to indicate when the NRVO was applied
+ // to this variable. Set it to zero to indicate that NRVO was not
+ // applied.
+ const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
+ llvm::Value *Zero = llvm::ConstantInt::get(BoolTy, 0);
+ NRVOFlag = CreateTempAlloca(BoolTy, "nrvo");
+ Builder.CreateStore(Zero, NRVOFlag);
+
+ // Record the NRVO flag for this variable.
+ NRVOFlags[&D] = NRVOFlag;
+ }
+ }
+ } else {
+ if (isByRef)
+ LTy = BuildByRefType(&D);
+
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getNameAsString());
+
+ Align = getContext().getDeclAlign(&D);
+ if (isByRef)
+ Align = std::max(Align,
+ CharUnits::fromQuantity(Target.getPointerAlign(0) / 8));
+ Alloc->setAlignment(Align.getQuantity());
+ DeclPtr = Alloc;
+ }
+ } else {
+ // Targets that don't support recursion emit locals as globals.
+ const char *Class =
+ D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
+ DeclPtr = CreateStaticBlockVarDecl(D, Class,
+ llvm::GlobalValue
+ ::InternalLinkage);
+ }
+
+ // FIXME: Can this happen?
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ } else {
+ EnsureInsertPoint();
+
+ if (!DidCallStackSave) {
+ // Save the stack.
+ const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+
+ Builder.CreateStore(V, Stack);
+
+ DidCallStackSave = true;
+
+ {
+ // Push a cleanup block and restore the stack there.
+ DelayedCleanupBlock scope(*this);
+
+ V = Builder.CreateLoad(Stack, "tmp");
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ Builder.CreateCall(F, V);
+ }
+ }
+
+ // Get the element type.
+ const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
+ const llvm::Type *LElemPtrTy =
+ llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
+
+ llvm::Value *VLASize = EmitVLASize(Ty);
+
+ // Downcast the VLA size expression
+ VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
+ false, "tmp");
+
+ // Allocate memory for the array.
+ llvm::AllocaInst *VLA =
+ Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
+ VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
+ DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for local var declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ assert(HaveInsertPoint() && "Unexpected unreachable point!");
+
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
+
+ // If this local has an initializer, emit it now.
+ const Expr *Init = D.getInit();
+
+ // If we are at an unreachable point, we don't need to emit the initializer
+ // unless it contains a label.
+ if (!HaveInsertPoint()) {
+ if (!ContainsLabel(Init))
+ Init = 0;
+ else
+ EnsureInsertPoint();
+ }
+
+ if (isByRef) {
+ const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
+
+ EnsureInsertPoint();
+ llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
+ llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
+ llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
+ llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
+ llvm::Value *V;
+ int flag = 0;
+ int flags = 0;
+
+ needsDispose = true;
+
+ if (Ty->isBlockPointerType()) {
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ } else if (BlockRequiresCopying(Ty)) {
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ }
+
+ // FIXME: Someone double check this.
+ if (Ty.isObjCGCWeak())
+ flag |= BLOCK_FIELD_IS_WEAK;
+
+ int isa = 0;
+ if (flag&BLOCK_FIELD_IS_WEAK)
+ isa = 1;
+ V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
+ V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
+ Builder.CreateStore(V, isa_field);
+
+ Builder.CreateStore(DeclPtr, forwarding_field);
+
+ V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
+ Builder.CreateStore(V, flags_field);
+
+ const llvm::Type *V1;
+ V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
+ V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ CGM.GetTargetTypeStoreSize(V1).getQuantity());
+ Builder.CreateStore(V, size_field);
+
+ if (flags & BLOCK_HAS_COPY_DISPOSE) {
+ BlockHasCopyDispose = true;
+ llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
+ Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag,
+ Align.getQuantity()),
+ copy_helper);
+
+ llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
+ Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
+ Align.getQuantity()),
+ destroy_helper);
+ }
+ }
+
+ if (Init) {
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
+ bool isVolatile =
+ getContext().getCanonicalType(D.getType()).isVolatileQualified();
+
+ // If the initializer was a simple constant initializer, we can optimize it
+ // in various ways.
+ if (IsSimpleConstantInitializer) {
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
+ assert(Init != 0 && "Wasn't a simple constant init?");
+
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ Align.getQuantity());
+ const llvm::Type *IntPtr =
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(IntPtr,
+ getContext().getTypeSizeInChars(Ty).getQuantity());
+
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ if (Loc->getType() != BP)
+ Loc = Builder.CreateBitCast(Loc, BP, "tmp");
+
+ llvm::Value *NotVolatile =
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
+
+ // If the initializer is all zeros, codegen with memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) {
+ llvm::Value *Zero =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
+ Builder.CreateCall5(CGM.getMemSetFn(Loc->getType(), SizeVal->getType()),
+ Loc, Zero, SizeVal, AlignVal, NotVolatile);
+ } else {
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = GetStaticDeclName(*this, D, ".");
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name, 0, false, 0);
+ GV->setAlignment(Align.getQuantity());
+
+ llvm::Value *SrcPtr = GV;
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+ Builder.CreateCall5(CGM.getMemCpyFn(Loc->getType(), SrcPtr->getType(),
+ SizeVal->getType()),
+ Loc, SrcPtr, SizeVal, AlignVal, NotVolatile);
+ }
+ } else if (Ty->isReferenceType()) {
+ RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
+ EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
+ } else if (!hasAggregateLLVMType(Init->getType())) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
+ } else if (Init->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, Loc, isVolatile);
+ } else {
+ EmitAggExpr(Init, Loc, isVolatile);
+ }
+ }
+
+ // Handle CXX destruction of variables.
+ QualType DtorTy(Ty);
+ while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
+ DtorTy = getContext().getBaseElementType(Array);
+ if (const RecordType *RT = DtorTy->getAs<RecordType>())
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->hasTrivialDestructor()) {
+ // Note: We suppress the destructor call when the corresponding NRVO
+ // flag has been set.
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
+ assert(D && "EmitLocalBlockVarDecl - destructor is nul");
+
+ if (const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(Ty)) {
+ {
+ DelayedCleanupBlock Scope(*this);
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(Loc, BasePtr);
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+
+ // Make sure to jump to the exit block.
+ EmitBranch(Scope.getCleanupExitBlock());
+ }
+ if (Exceptions) {
+ EHCleanupBlock Cleanup(*this);
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(Loc, BasePtr);
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+ }
+ } else {
+ {
+ // Normal destruction.
+ DelayedCleanupBlock Scope(*this);
+
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused");
+ Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"),
+ Scope.getCleanupExitBlock(),
+ NoNRVO);
+ EmitBlock(NoNRVO);
+ }
+
+ // We don't call the destructor along the normal edge if we're
+ // applying the NRVO.
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
+ Loc);
+
+ // Make sure to jump to the exit block.
+ EmitBranch(Scope.getCleanupExitBlock());
+ }
+
+ if (Exceptions) {
+ EHCleanupBlock Cleanup(*this);
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
+ Loc);
+ }
+ }
+ }
+ }
+
+ // Handle the cleanup attribute
+ if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+ const FunctionDecl *FD = CA->getFunctionDecl();
+
+ llvm::Constant* F = CGM.GetAddrOfFunction(FD);
+ assert(F && "Could not find function!");
+
+ const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
+
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = Info.arg_begin()->type;
+ {
+ DelayedCleanupBlock scope(*this);
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
+ ConvertType(ArgTy))),
+ getContext().getPointerType(D.getType())));
+ EmitCall(Info, F, ReturnValueSlot(), Args);
+ }
+ if (Exceptions) {
+ EHCleanupBlock Cleanup(*this);
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
+ ConvertType(ArgTy))),
+ getContext().getPointerType(D.getType())));
+ EmitCall(Info, F, ReturnValueSlot(), Args);
+ }
+ }
+
+ if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
+ {
+ DelayedCleanupBlock scope(*this);
+ llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ BuildBlockRelease(V);
+ }
+ // FIXME: Turn this on and audit the codegen
+ if (0 && Exceptions) {
+ EHCleanupBlock Cleanup(*this);
+ llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ BuildBlockRelease(V);
+ }
+ }
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
+ // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+ assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+ "Invalid argument to EmitParmDecl");
+ QualType Ty = D.getType();
+ CanQualType CTy = getContext().getCanonicalType(Ty);
+
+ llvm::Value *DeclPtr;
+ // If this is an aggregate or variable sized value, reuse the input pointer.
+ if (!Ty->isConstantSizeType() ||
+ CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ DeclPtr = Arg;
+ } else {
+ // Otherwise, create a temporary to hold the value.
+ DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+
+ // Store the initial value into the alloca.
+ EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Ty);
+ }
+ Arg->setName(D.getName());
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for param declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
new file mode 100644
index 0000000..f94ddd9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -0,0 +1,424 @@
+//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ declarations
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+ assert(!D.getType()->isReferenceType() &&
+ "Should not call EmitDeclInit on a reference!");
+
+ ASTContext &Context = CGF.getContext();
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+ bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
+
+ if (!CGF.hasAggregateLLVMType(T)) {
+ llvm::Value *V = CGF.EmitScalarExpr(Init);
+ CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
+ } else {
+ CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
+ }
+}
+
+static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &Context = CGF.getContext();
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+ if (!CGF.hasAggregateLLVMType(T) || T->isAnyComplexType())
+ return;
+
+ // Avoid generating destructor(s) for initialized objects.
+ if (!isa<CXXConstructExpr>(Init))
+ return;
+
+ const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
+ if (Array)
+ T = Context.getBaseElementType(Array);
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialDestructor())
+ return;
+
+ CXXDestructorDecl *Dtor = RD->getDestructor(Context);
+
+ llvm::Constant *DtorFn;
+ if (Array) {
+ DtorFn =
+ CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor,
+ Array,
+ DeclPtr);
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
+ } else
+ DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
+
+ CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
+}
+
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+
+ if (!T->isReferenceType()) {
+ EmitDeclInit(*this, D, DeclPtr);
+ EmitDeclDestroy(*this, D, DeclPtr);
+ return;
+ }
+ if (Init->isLvalue(getContext()) == Expr::LV_Valid) {
+ RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
+ return;
+ }
+ ErrorUnsupported(Init,
+ "global variable that binds reference to a non-lvalue");
+}
+
+void
+CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+ llvm::Constant *DeclPtr) {
+ // Generate a global destructor entry if not using __cxa_atexit.
+ if (!CGM.getCodeGenOpts().CXAAtExit) {
+ CGM.AddCXXDtorEntry(DtorFn, DeclPtr);
+ return;
+ }
+
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ std::vector<const llvm::Type *> Params;
+ Params.push_back(Int8PtrTy);
+
+ // Get the destructor function type
+ const llvm::Type *DtorFnTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
+
+ Params.clear();
+ Params.push_back(DtorFnTy);
+ Params.push_back(Int8PtrTy);
+ Params.push_back(Int8PtrTy);
+
+ // Get the __cxa_atexit function type
+ // extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
+ const llvm::FunctionType *AtExitFnTy =
+ llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
+
+ llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
+ "__cxa_atexit");
+
+ llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
+ "__dso_handle");
+ llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
+ llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
+ Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
+}
+
+void
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create a variable initialization function.
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ "__cxx_global_var_init", &TheModule);
+
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
+
+ CXXGlobalInits.push_back(Fn);
+}
+
+void
+CodeGenModule::EmitCXXGlobalInitFunc() {
+ if (CXXGlobalInits.empty())
+ return;
+
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create our global initialization function.
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ "_GLOBAL__I_a", &TheModule);
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
+ AddGlobalCtor(Fn);
+}
+
+void CodeGenModule::AddCXXDtorEntry(llvm::Constant *DtorFn,
+ llvm::Constant *Object) {
+ CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
+}
+
+void CodeGenModule::EmitCXXGlobalDtorFunc() {
+ if (CXXGlobalDtors.empty())
+ return;
+
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create our global destructor function.
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ "_GLOBAL__D_a", &TheModule);
+
+ CodeGenFunction(*this).GenerateCXXGlobalDtorFunc(Fn, CXXGlobalDtors);
+ AddGlobalDtor(Fn);
+}
+
+void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
+ EmitCXXGlobalVarDeclInit(*D, DeclPtr);
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ for (unsigned i = 0; i != NumDecls; ++i)
+ Builder.CreateCall(Decls[i]);
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::Constant*, llvm::Constant*> >
+ &DtorsAndObjects) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ // Emit the dtors, in reverse order from construction.
+ for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ llvm::Constant *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::CallInst *CI = Builder.CreateCall(Callee,
+ DtorsAndObjects[e - i - 1].second);
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
+ CI->setCallingConv(F->getCallingConv());
+ }
+
+ FinishFunction();
+}
+
+static llvm::Constant *getGuardAcquireFn(CodeGenFunction &CGF) {
+ // int __cxa_guard_acquire(__int64_t *guard_object);
+
+ const llvm::Type *Int64PtrTy =
+ llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+
+ std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.ConvertType(CGF.getContext().IntTy),
+ Args, /*isVarArg=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenFunction &CGF) {
+ // void __cxa_guard_release(__int64_t *guard_object);
+
+ const llvm::Type *Int64PtrTy =
+ llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+
+ std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenFunction &CGF) {
+ // void __cxa_guard_abort(__int64_t *guard_object);
+
+ const llvm::Type *Int64PtrTy =
+ llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+
+ std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, /*isVarArg=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
+}
+
+void
+CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ // Bail out early if this initializer isn't reachable.
+ if (!Builder.GetInsertBlock()) return;
+
+ bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
+
+ llvm::SmallString<256> GuardVName;
+ CGM.getMangleContext().mangleGuardVariable(&D, GuardVName);
+
+ // Create the guard variable.
+ const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(VMContext);
+ llvm::GlobalValue *GuardVariable =
+ new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
+ false, GV->getLinkage(),
+ llvm::Constant::getNullValue(Int64Ty),
+ GuardVName.str());
+
+ // Load the first byte of the guard variable.
+ const llvm::Type *PtrTy
+ = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+ llvm::Value *V =
+ Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
+
+ llvm::BasicBlock *InitCheckBlock = createBasicBlock("init.check");
+ llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
+
+ // Check if the first byte of the guard variable is zero.
+ Builder.CreateCondBr(Builder.CreateIsNull(V, "tobool"),
+ InitCheckBlock, EndBlock);
+
+ EmitBlock(InitCheckBlock);
+
+ // Variables used when coping with thread-safe statics and exceptions.
+ llvm::BasicBlock *SavedLandingPad = 0;
+ llvm::BasicBlock *LandingPad = 0;
+ if (ThreadsafeStatics) {
+ // Call __cxa_guard_acquire.
+ V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
+
+ llvm::BasicBlock *InitBlock = createBasicBlock("init");
+
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+ InitBlock, EndBlock);
+
+ if (Exceptions) {
+ SavedLandingPad = getInvokeDest();
+ LandingPad = createBasicBlock("guard.lpad");
+ setInvokeDest(LandingPad);
+ }
+
+ EmitBlock(InitBlock);
+ }
+
+ if (D.getType()->isReferenceType()) {
+ QualType T = D.getType();
+ // We don't want to pass true for IsInitializer here, because a static
+ // reference to a temporary does not extend its lifetime.
+ RValue RV = EmitReferenceBindingToExpr(D.getInit(),
+ /*IsInitializer=*/false);
+ EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T);
+
+ } else
+ EmitDeclInit(*this, D, GV);
+
+ if (ThreadsafeStatics) {
+ // Call __cxa_guard_release.
+ Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
+ } else {
+ llvm::Value *One =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
+ Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
+ }
+
+ // Register the call to the destructor.
+ if (!D.getType()->isReferenceType())
+ EmitDeclDestroy(*this, D, GV);
+
+ if (ThreadsafeStatics && Exceptions) {
+ // If an exception is thrown during initialization, call __cxa_guard_abort
+ // along the exceptional edge.
+ EmitBranch(EndBlock);
+
+ // Construct the landing pad.
+ EmitBlock(LandingPad);
+
+ // Personality function and LLVM intrinsics.
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
+ (VMContext),
+ true),
+ "__gxx_personality_v0");
+ Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+ llvm::Value *llvm_eh_exception =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+
+ // Exception object
+ llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ // Call the selector function.
+ const llvm::PointerType *PtrToInt8Ty
+ = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
+ llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ llvm::Value* SelectorArgs[3] = { Exc, Personality, Null };
+ Builder.CreateCall(llvm_eh_selector, SelectorArgs, SelectorArgs + 3,
+ "selector");
+ Builder.CreateStore(Exc, RethrowPtr);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
+
+ setInvokeDest(SavedLandingPad);
+
+ // Rethrow the current exception.
+ if (getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
+ getInvokeDest(),
+ Builder.CreateLoad(RethrowPtr));
+ EmitBlock(Cont);
+ } else
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(RethrowPtr));
+
+ Builder.CreateUnreachable();
+ }
+
+ EmitBlock(EndBlock);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
new file mode 100644
index 0000000..ddc1c77
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -0,0 +1,752 @@
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtCXX.h"
+
+#include "llvm/Intrinsics.h"
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
+ // void *__cxa_allocate_exception(size_t thrown_size);
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ std::vector<const llvm::Type*> Args(1, SizeTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
+}
+
+static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
+ // void __cxa_free_exception(void *thrown_exception);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
+}
+
+static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
+ // void (*dest) (void *));
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(3, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
+}
+
+static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_rethrow();
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+}
+
+static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
+ // void* __cxa_begin_catch();
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
+}
+
+static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
+ // void __cxa_end_catch();
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
+}
+
+static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
+ // void __cxa_call_unexepcted(void *thrown_exception);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
+}
+
+llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), Args,
+ false);
+
+ if (CGM.getLangOptions().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
+}
+
+static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
+ // void __terminate();
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy,
+ CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenModule &CGM) {
+ const char *PersonalityFnName = "__gcc_personality_v0";
+ LangOptions Opts = CGM.getLangOptions();
+ if (Opts.CPlusPlus)
+ PersonalityFnName = "__gxx_personality_v0";
+ else if (Opts.ObjC1) {
+ if (Opts.NeXTRuntime) {
+ if (Opts.ObjCNonFragileABI)
+ PersonalityFnName = "__gcc_personality_v0";
+ } else
+ PersonalityFnName = "__gnu_objc_personality_v0";
+ }
+
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(
+ CGM.getLLVMContext()),
+ true),
+ PersonalityFnName);
+ return llvm::ConstantExpr::getBitCast(Personality, CGM.PtrToInt8Ty);
+}
+
+// Emits an exception expression into the given location. This
+// differs from EmitAnyExprToMem only in that, if a final copy-ctor
+// call is required, an exception within that copy ctor causes
+// std::terminate to be invoked.
+static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
+ llvm::Value *ExnLoc) {
+ // We want to release the allocated exception object if this
+ // expression throws. We do this by pushing an EH-only cleanup
+ // block which, furthermore, deactivates itself after the expression
+ // is complete.
+ llvm::AllocaInst *ShouldFreeVar =
+ CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
+ "should-free-exnobj.var");
+ CGF.InitTempAlloca(ShouldFreeVar,
+ llvm::ConstantInt::getFalse(CGF.getLLVMContext()));
+
+ // A variable holding the exception pointer. This is necessary
+ // because the throw expression does not necessarily dominate the
+ // cleanup, for example if it appears in a conditional expression.
+ llvm::AllocaInst *ExnLocVar =
+ CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
+
+ llvm::BasicBlock *SavedInvokeDest = CGF.getInvokeDest();
+ {
+ CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
+
+ llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
+ "should-free-exnobj");
+ CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
+ CGF.EmitBlock(FreeBB);
+ llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal);
+ CGF.EmitBlock(DoneBB);
+ }
+ llvm::BasicBlock *Cleanup = CGF.getInvokeDest();
+
+ CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
+ ShouldFreeVar);
+
+ // __cxa_allocate_exception returns a void*; we need to cast this
+ // to the appropriate type for the object.
+ const llvm::Type *Ty = CGF.ConvertType(E->getType())->getPointerTo();
+ llvm::Value *TypedExnLoc = CGF.Builder.CreateBitCast(ExnLoc, Ty);
+
+ // FIXME: this isn't quite right! If there's a final unelided call
+ // to a copy constructor, then according to [except.terminate]p1 we
+ // must call std::terminate() if that constructor throws, because
+ // technically that copy occurs after the exception expression is
+ // evaluated but before the exception is caught. But the best way
+ // to handle that is to teach EmitAggExpr to do the final copy
+ // differently if it can't be elided.
+ CGF.EmitAnyExprToMem(E, TypedExnLoc, /*Volatile*/ false);
+
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
+ ShouldFreeVar);
+
+ // Pop the cleanup block if it's still the top of the cleanup stack.
+ // Otherwise, temporaries have been created and our cleanup will get
+ // properly removed in time.
+ // TODO: this is not very resilient.
+ if (CGF.getInvokeDest() == Cleanup)
+ CGF.setInvokeDest(SavedInvokeDest);
+}
+
+// CopyObject - Utility to copy an object. Calls copy constructor as necessary.
+// N is casted to the right type.
+static void CopyObject(CodeGenFunction &CGF, QualType ObjectType,
+ bool WasPointer, bool WasPointerReference,
+ llvm::Value *E, llvm::Value *N) {
+ // Store the throw exception in the exception object.
+ if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) {
+ llvm::Value *Value = E;
+ if (!WasPointer)
+ Value = CGF.Builder.CreateLoad(Value);
+ const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
+ if (WasPointerReference) {
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param");
+ CGF.Builder.CreateStore(Value, Tmp);
+ Value = Tmp;
+ ValuePtrTy = Value->getType()->getPointerTo(0);
+ }
+ N = CGF.Builder.CreateBitCast(N, ValuePtrTy);
+ CGF.Builder.CreateStore(Value, N);
+ } else {
+ const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0);
+ const CXXRecordDecl *RD;
+ RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
+ llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty);
+ if (RD->hasTrivialCopyConstructor()) {
+ CGF.EmitAggregateCopy(This, E, ObjectType);
+ } else if (CXXConstructorDecl *CopyCtor
+ = RD->getCopyConstructor(CGF.getContext(), 0)) {
+ llvm::Value *Src = E;
+
+ // Stolen from EmitClassAggrMemberwiseCopy
+ llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
+ Ctor_Complete);
+ CallArgList CallArgs;
+ CallArgs.push_back(std::make_pair(RValue::get(This),
+ CopyCtor->getThisType(CGF.getContext())));
+
+ // Push the Src ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Src),
+ CopyCtor->getParamDecl(0)->getType()));
+
+ const FunctionProtoType *FPT
+ = CopyCtor->getType()->getAs<FunctionProtoType>();
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+ Callee, ReturnValueSlot(), CallArgs, CopyCtor);
+ } else
+ llvm_unreachable("uncopyable object");
+ }
+}
+
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
+ if (!E->getSubExpr()) {
+ if (getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(getReThrowFn(*this), Cont, getInvokeDest())
+ ->setDoesNotReturn();
+ EmitBlock(Cont);
+ } else
+ Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ QualType ThrowType = E->getSubExpr()->getType();
+
+ // Now allocate the exception object.
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
+
+ llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
+ llvm::Value *ExceptionPtr =
+ Builder.CreateCall(AllocExceptionFn,
+ llvm::ConstantInt::get(SizeTy, TypeSize),
+ "exception");
+
+ EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
+
+ // Now throw the exception.
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, true);
+
+ // The address of the destructor. If the exception type has a
+ // trivial destructor (or isn't a record), we just pass null.
+ llvm::Constant *Dtor = 0;
+ if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!Record->hasTrivialDestructor()) {
+ CXXDestructorDecl *DtorD = Record->getDestructor(getContext());
+ Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
+ Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
+ }
+ }
+ if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
+
+ if (getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ llvm::InvokeInst *ThrowCall =
+ Builder.CreateInvoke3(getThrowFn(*this), Cont, getInvokeDest(),
+ ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ EmitBlock(Cont);
+ } else {
+ llvm::CallInst *ThrowCall =
+ Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ }
+ Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in generally
+ // are not ready to handle emitting expressions at unreachable points.
+ EnsureInsertPoint();
+}
+
+void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
+ if (!Exceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ assert(!Proto->hasAnyExceptionSpec() && "function with parameter pack");
+
+ if (!Proto->hasExceptionSpec())
+ return;
+
+ llvm::Constant *Personality = getPersonalityFn(CGM);
+ llvm::Value *llvm_eh_exception =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ const llvm::IntegerType *Int8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+ llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+
+ llvm::BasicBlock *PrevLandingPad = getInvokeDest();
+ llvm::BasicBlock *EHSpecHandler = createBasicBlock("ehspec.handler");
+ llvm::BasicBlock *Match = createBasicBlock("match");
+ llvm::BasicBlock *Unwind = 0;
+
+ assert(PrevLandingPad == 0 && "EHSpec has invoke context");
+ (void)PrevLandingPad;
+
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+
+ EmitBranchThroughCleanup(Cont);
+
+ // Emit the statements in the try {} block
+ setInvokeDest(EHSpecHandler);
+
+ EmitBlock(EHSpecHandler);
+ // Exception object
+ llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ SelectorArgs.push_back(Exc);
+ SelectorArgs.push_back(Personality);
+ SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ Proto->getNumExceptions()+1));
+
+ for (unsigned i = 0; i < Proto->getNumExceptions(); ++i) {
+ QualType Ty = Proto->getExceptionType(i);
+ QualType ExceptType
+ = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
+ SelectorArgs.push_back(EHType);
+ }
+ if (Proto->getNumExceptions())
+ SelectorArgs.push_back(Null);
+
+ // Find which handler was matched.
+ llvm::Value *Selector
+ = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
+ SelectorArgs.end(), "selector");
+ if (Proto->getNumExceptions()) {
+ Unwind = createBasicBlock("Unwind");
+
+ Builder.CreateStore(Exc, RethrowPtr);
+ Builder.CreateCondBr(Builder.CreateICmpSLT(Selector,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ 0)),
+ Match, Unwind);
+
+ EmitBlock(Match);
+ }
+ Builder.CreateCall(getUnexpectedFn(*this), Exc)->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ if (Proto->getNumExceptions()) {
+ EmitBlock(Unwind);
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(RethrowPtr));
+ Builder.CreateUnreachable();
+ }
+
+ EmitBlock(Cont);
+}
+
+void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
+ if (!Exceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ if (!Proto->hasExceptionSpec())
+ return;
+
+ setInvokeDest(0);
+}
+
+void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+ CXXTryStmtInfo Info = EnterCXXTryStmt(S);
+ EmitStmt(S.getTryBlock());
+ ExitCXXTryStmt(S, Info);
+}
+
+CodeGenFunction::CXXTryStmtInfo
+CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S) {
+ CXXTryStmtInfo Info;
+ Info.SavedLandingPad = getInvokeDest();
+ Info.HandlerBlock = createBasicBlock("try.handler");
+ Info.FinallyBlock = createBasicBlock("finally");
+
+ PushCleanupBlock(Info.FinallyBlock);
+ setInvokeDest(Info.HandlerBlock);
+
+ return Info;
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
+ CXXTryStmtInfo TryInfo) {
+ // Pointer to the personality function
+ llvm::Constant *Personality = getPersonalityFn(CGM);
+ llvm::Value *llvm_eh_exception =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+
+ llvm::BasicBlock *PrevLandingPad = TryInfo.SavedLandingPad;
+ llvm::BasicBlock *TryHandler = TryInfo.HandlerBlock;
+ llvm::BasicBlock *FinallyBlock = TryInfo.FinallyBlock;
+ llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end");
+
+ // Jump to end if there is no exception
+ EmitBranchThroughCleanup(FinallyEnd);
+
+ llvm::BasicBlock *TerminateHandler = getTerminateHandler();
+
+ // Emit the handlers
+ EmitBlock(TryHandler);
+
+ const llvm::IntegerType *Int8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+ llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+ llvm::Value *llvm_eh_typeid_for =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ // Exception object
+ llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ SelectorArgs.push_back(Exc);
+ SelectorArgs.push_back(Personality);
+
+ bool HasCatchAll = false;
+ for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
+ const CXXCatchStmt *C = S.getHandler(i);
+ VarDecl *CatchParam = C->getExceptionDecl();
+ if (CatchParam) {
+ // C++ [except.handle]p3 indicates that top-level cv-qualifiers
+ // are ignored.
+ QualType CaughtType = C->getCaughtType().getNonReferenceType();
+ llvm::Value *EHTypeInfo
+ = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType(), true);
+ SelectorArgs.push_back(EHTypeInfo);
+ } else {
+ // null indicates catch all
+ SelectorArgs.push_back(Null);
+ HasCatchAll = true;
+ }
+ }
+
+ // We use a cleanup unless there was already a catch all.
+ if (!HasCatchAll) {
+ SelectorArgs.push_back(Null);
+ }
+
+ // Find which handler was matched.
+ llvm::Value *Selector
+ = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
+ SelectorArgs.end(), "selector");
+ for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
+ const CXXCatchStmt *C = S.getHandler(i);
+ VarDecl *CatchParam = C->getExceptionDecl();
+ Stmt *CatchBody = C->getHandlerBlock();
+
+ llvm::BasicBlock *Next = 0;
+
+ if (SelectorArgs[i+2] != Null) {
+ llvm::BasicBlock *Match = createBasicBlock("match");
+ Next = createBasicBlock("catch.next");
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ llvm::Value *Id
+ = Builder.CreateCall(llvm_eh_typeid_for,
+ Builder.CreateBitCast(SelectorArgs[i+2],
+ Int8PtrTy));
+ Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id),
+ Match, Next);
+ EmitBlock(Match);
+ }
+
+ llvm::BasicBlock *MatchEnd = createBasicBlock("match.end");
+ llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler");
+
+ PushCleanupBlock(MatchEnd);
+ setInvokeDest(MatchHandler);
+
+ llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc);
+
+ {
+ CleanupScope CatchScope(*this);
+ // Bind the catch parameter if it exists.
+ if (CatchParam) {
+ QualType CatchType = CatchParam->getType().getNonReferenceType();
+ setInvokeDest(TerminateHandler);
+ bool WasPointer = true;
+ bool WasPointerReference = false;
+ CatchType = CGM.getContext().getCanonicalType(CatchType);
+ if (CatchType.getTypePtr()->isPointerType()) {
+ if (isa<ReferenceType>(CatchParam->getType()))
+ WasPointerReference = true;
+ } else {
+ if (!isa<ReferenceType>(CatchParam->getType()))
+ WasPointer = false;
+ CatchType = getContext().getPointerType(CatchType);
+ }
+ ExcObject = Builder.CreateBitCast(ExcObject, ConvertType(CatchType));
+ EmitLocalBlockVarDecl(*CatchParam);
+ // FIXME: we need to do this sooner so that the EH region for the
+ // cleanup doesn't start until after the ctor completes, use a decl
+ // init?
+ CopyObject(*this, CatchParam->getType().getNonReferenceType(),
+ WasPointer, WasPointerReference, ExcObject,
+ GetAddrOfLocalVar(CatchParam));
+ setInvokeDest(MatchHandler);
+ }
+
+ EmitStmt(CatchBody);
+ }
+
+ EmitBranchThroughCleanup(FinallyEnd);
+
+ EmitBlock(MatchHandler);
+
+ llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ llvm::Value *Args[] = {
+ Exc, Personality,
+ llvm::ConstantInt::getNullValue(llvm::Type::getInt32Ty(VMContext))
+ };
+ Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
+ Builder.CreateStore(Exc, RethrowPtr);
+ EmitBranchThroughCleanup(FinallyRethrow);
+
+ CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+
+ EmitBlock(MatchEnd);
+
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(getEndCatchFn(*this),
+ Cont, TerminateHandler,
+ &Args[0], &Args[0]);
+ EmitBlock(Cont);
+ if (Info.SwitchBlock)
+ EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ EmitBlock(Info.EndBlock);
+
+ Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+ Builder.CreateStore(Exc, RethrowPtr);
+ EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (Next)
+ EmitBlock(Next);
+ }
+ if (!HasCatchAll) {
+ Builder.CreateStore(Exc, RethrowPtr);
+ EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+
+ setInvokeDest(PrevLandingPad);
+
+ EmitBlock(FinallyBlock);
+
+ if (Info.SwitchBlock)
+ EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ EmitBlock(Info.EndBlock);
+
+ // Branch around the rethrow code.
+ EmitBranch(FinallyEnd);
+
+ EmitBlock(FinallyRethrow);
+ // FIXME: Eventually we can chain the handlers together and just do a call
+ // here.
+ if (getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
+ getInvokeDest(),
+ Builder.CreateLoad(RethrowPtr));
+ EmitBlock(Cont);
+ } else
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(RethrowPtr));
+
+ Builder.CreateUnreachable();
+
+ EmitBlock(FinallyEnd);
+}
+
+CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
+ CGF.setInvokeDest(PreviousInvokeDest);
+
+ llvm::BasicBlock *EndOfCleanup = CGF.Builder.GetInsertBlock();
+
+ // Jump to the beginning of the cleanup.
+ CGF.Builder.SetInsertPoint(CleanupHandler, CleanupHandler->begin());
+
+ // The libstdc++ personality function.
+ // TODO: generalize to work with other libraries.
+ llvm::Constant *Personality = getPersonalityFn(CGF.CGM);
+
+ // %exception = call i8* @llvm.eh.exception()
+ // Magic intrinsic which tells gives us a handle to the caught
+ // exception.
+ llvm::Value *llvm_eh_exception =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+
+ llvm::Constant *Null = llvm::ConstantPointerNull::get(CGF.PtrToInt8Ty);
+
+ // %ignored = call i32 @llvm.eh.selector(i8* %exception,
+ // i8* @__gxx_personality_v0,
+ // i8* null)
+ // Magic intrinsic which tells LLVM that this invoke landing pad is
+ // just a cleanup block.
+ llvm::Value *Args[] = { Exc, Personality, Null };
+ llvm::Value *llvm_eh_selector =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ CGF.Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
+
+ // And then we fall through into the code that the user put there.
+ // Jump back to the end of the cleanup.
+ CGF.Builder.SetInsertPoint(EndOfCleanup);
+
+ // Rethrow the exception.
+ if (CGF.getInvokeDest()) {
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(CGF.getUnwindResumeOrRethrowFn(), Cont,
+ CGF.getInvokeDest(), Exc);
+ CGF.EmitBlock(Cont);
+ } else
+ CGF.Builder.CreateCall(CGF.getUnwindResumeOrRethrowFn(), Exc);
+ CGF.Builder.CreateUnreachable();
+
+ // Resume inserting where we started, but put the new cleanup
+ // handler in place.
+ if (PreviousInsertionBlock)
+ CGF.Builder.SetInsertPoint(PreviousInsertionBlock);
+ else
+ CGF.Builder.ClearInsertionPoint();
+
+ if (CGF.Exceptions)
+ CGF.setInvokeDest(CleanupHandler);
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
+ if (TerminateHandler)
+ return TerminateHandler;
+
+ // We don't want to change anything at the current location, so
+ // save it aside and clear the insert point.
+ llvm::BasicBlock *SavedInsertBlock = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
+ Builder.ClearInsertionPoint();
+
+ llvm::Constant *Personality = getPersonalityFn(CGM);
+ llvm::Value *llvm_eh_exception =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+
+ // Set up terminate handler
+ TerminateHandler = createBasicBlock("terminate.handler");
+ EmitBlock(TerminateHandler);
+ llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ llvm::Value *Args[] = {
+ Exc, Personality,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)
+ };
+ Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
+ llvm::CallInst *TerminateCall =
+ Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.SetInsertPoint(SavedInsertBlock, SavedInsertPoint);
+
+ return TerminateHandler;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..d67618b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,2084 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCall.h"
+#include "CGRecordLayout.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Intrinsics.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+// Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
+ const llvm::Twine &Name) {
+ if (!Builder.isNamePreserving())
+ return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
+ return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
+}
+
+void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
+ llvm::Value *Init) {
+ llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
+ llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+ Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
+}
+
+llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty,
+ const llvm::Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty,
+ const llvm::Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+ QualType BoolTy = getContext().BoolTy;
+ if (E->getType()->isMemberFunctionPointerType()) {
+ LValue LV = EmitAggExprToLValue(E);
+
+ // Get the pointer.
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(LV.getAddress(), 0,
+ "src.ptr");
+ FuncPtr = Builder.CreateLoad(FuncPtr);
+
+ llvm::Value *IsNotNull =
+ Builder.CreateICmpNE(FuncPtr,
+ llvm::Constant::getNullValue(FuncPtr->getType()),
+ "tobool");
+
+ return IsNotNull;
+ }
+ if (!E->getType()->isAnyComplexType())
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which can have
+/// any type. The result is returned as an RValue struct. If this is an
+/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the
+/// result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
+ bool IsAggLocVolatile, bool IgnoreResult,
+ bool IsInitializer) {
+ if (!hasAggregateLLVMType(E->getType()))
+ return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ else if (E->getType()->isAnyComplexType())
+ return RValue::getComplex(EmitComplexExpr(E, false, false,
+ IgnoreResult, IgnoreResult));
+
+ EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer);
+ return RValue::getAggregate(AggLoc, IsAggLocVolatile);
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// always be accessible even if no aggregate location is provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E,
+ bool IsAggLocVolatile,
+ bool IsInitializer) {
+ llvm::Value *AggLoc = 0;
+
+ if (hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ AggLoc = CreateMemTemp(E->getType(), "agg.tmp");
+ return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false,
+ IsInitializer);
+}
+
+/// EmitAnyExprToMem - Evaluate an expression into a given memory
+/// location.
+void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
+ llvm::Value *Location,
+ bool IsLocationVolatile,
+ bool IsInit) {
+ if (E->getType()->isComplexType())
+ EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
+ else if (hasAggregateLLVMType(E->getType()))
+ EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit);
+ else {
+ RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
+ LValue LV = LValue::MakeAddr(Location, MakeQualifiers(E->getType()));
+ EmitStoreThroughLValue(RV, LV, E->getType());
+ }
+}
+
+/// \brief An adjustment to be made to the temporary created when emitting a
+/// reference binding, which accesses a particular subobject of that temporary.
+struct SubobjectAdjustment {
+ enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+
+ union {
+ struct {
+ const CXXBaseSpecifierArray *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ struct {
+ FieldDecl *Field;
+ unsigned CVRQualifiers;
+ } Field;
+ };
+
+ SubobjectAdjustment(const CXXBaseSpecifierArray *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment)
+ {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field, unsigned CVRQualifiers)
+ : Kind(FieldAdjustment)
+ {
+ this->Field.Field = Field;
+ this->Field.CVRQualifiers = CVRQualifiers;
+ }
+};
+
+RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+ bool IsInitializer) {
+ bool ShouldDestroyTemporaries = false;
+ unsigned OldNumLiveTemporaries = 0;
+
+ if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
+ E = DAE->getExpr();
+
+ if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
+ ShouldDestroyTemporaries = true;
+
+ // Keep track of the current cleanup stack depth.
+ OldNumLiveTemporaries = LiveTemporaries.size();
+
+ E = TE->getSubExpr();
+ }
+
+ RValue Val;
+ if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+ // Emit the expr as an lvalue.
+ LValue LV = EmitLValue(E);
+ if (LV.isSimple()) {
+ if (ShouldDestroyTemporaries) {
+ // Pop temporaries.
+ while (LiveTemporaries.size() > OldNumLiveTemporaries)
+ PopCXXTemporary();
+ }
+
+ return RValue::get(LV.getAddress());
+ }
+
+ Val = EmitLoadOfLValue(LV, E->getType());
+
+ if (ShouldDestroyTemporaries) {
+ // Pop temporaries.
+ while (LiveTemporaries.size() > OldNumLiveTemporaries)
+ PopCXXTemporary();
+ }
+ } else {
+ QualType ResultTy = E->getType();
+
+ llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
+ do {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if ((CE->getCastKind() == CastExpr::CK_DerivedToBase ||
+ CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase) &&
+ E->getType()->isRecordType()) {
+ E = CE->getSubExpr();
+ CXXRecordDecl *Derived
+ = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ Adjustments.push_back(SubobjectAdjustment(&CE->getBasePath(),
+ Derived));
+ continue;
+ }
+
+ if (CE->getCastKind() == CastExpr::CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (ME->getBase()->isLvalue(getContext()) != Expr::LV_Valid &&
+ ME->getBase()->getType()->isRecordType()) {
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ E = ME->getBase();
+ Adjustments.push_back(SubobjectAdjustment(Field,
+ E->getType().getCVRQualifiers()));
+ continue;
+ }
+ }
+ }
+
+ // Nothing changed.
+ break;
+ } while (true);
+
+ Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false,
+ IsInitializer);
+
+ if (ShouldDestroyTemporaries) {
+ // Pop temporaries.
+ while (LiveTemporaries.size() > OldNumLiveTemporaries)
+ PopCXXTemporary();
+ }
+
+ if (IsInitializer) {
+ // We might have to destroy the temporary variable.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->hasTrivialDestructor()) {
+ const CXXDestructorDecl *Dtor =
+ ClassDecl->getDestructor(getContext());
+
+ {
+ DelayedCleanupBlock Scope(*this);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
+ Val.getAggregateAddr());
+
+ // Make sure to jump to the exit block.
+ EmitBranch(Scope.getCleanupExitBlock());
+ }
+ if (Exceptions) {
+ EHCleanupBlock Cleanup(*this);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
+ Val.getAggregateAddr());
+ }
+ }
+ }
+ }
+ }
+
+ // Check if need to perform derived-to-base casts and/or field accesses, to
+ // get from the temporary object we created (and, potentially, for which we
+ // extended the lifetime) to the subobject we're binding the reference to.
+ if (!Adjustments.empty()) {
+ llvm::Value *Object = Val.getAggregateAddr();
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object = GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ *Adjustment.DerivedToBase.BasePath,
+ /*NullCheckValue=*/false);
+ break;
+
+ case SubobjectAdjustment::FieldAdjustment: {
+ unsigned CVR = Adjustment.Field.CVRQualifiers;
+ LValue LV = EmitLValueForField(Object, Adjustment.Field.Field, CVR);
+ if (LV.isSimple()) {
+ Object = LV.getAddress();
+ break;
+ }
+
+ // For non-simple lvalues, we actually have to create a copy of
+ // the object we're binding to.
+ QualType T = Adjustment.Field.Field->getType().getNonReferenceType()
+ .getUnqualifiedType();
+ Object = CreateTempAlloca(ConvertType(T), "lv");
+ EmitStoreThroughLValue(EmitLoadOfLValue(LV, T),
+ LValue::MakeAddr(Object,
+ Qualifiers::fromCVRMask(CVR)),
+ T);
+ break;
+ }
+ }
+ }
+
+ const llvm::Type *ResultPtrTy
+ = llvm::PointerType::get(ConvertType(ResultTy), 0);
+ Object = Builder.CreateBitCast(Object, ResultPtrTy, "temp");
+ return RValue::get(Object);
+ }
+ }
+
+ if (Val.isAggregate()) {
+ Val = RValue::get(Val.getAggregateAddr());
+ } else {
+ // Create a temporary variable that we can bind the reference to.
+ llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp");
+ if (Val.isScalar())
+ EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType());
+ else
+ StoreComplexToAddr(Val.getComplexVal(), Temp, false);
+ Val = RValue::get(Temp);
+ }
+
+ return Val;
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return the
+/// input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+ const llvm::Constant *Elts) {
+ if (isa<llvm::ConstantAggregateZero>(Elts))
+ return 0;
+
+ return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+}
+
+void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
+ if (!CatchUndefined)
+ return;
+
+ const llvm::Type *Size_tTy
+ = llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+ Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1);
+ const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1);
+
+ // In time, people may want to control this and use a 1 here.
+ llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
+ llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
+ llvm::BasicBlock *Cont = createBasicBlock();
+ llvm::BasicBlock *Check = createBasicBlock();
+ llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL);
+ Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
+
+ EmitBlock(Check);
+ Builder.CreateCondBr(Builder.CreateICmpUGE(C,
+ llvm::ConstantInt::get(Size_tTy, Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+}
+
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ QualType ValTy = E->getSubExpr()->getType();
+ llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal();
+
+ int AmountVal = isInc ? 1 : -1;
+
+ if (ValTy->isPointerType() &&
+ ValTy->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition/subtraction needs to account for the VLA size
+ ErrorUnsupported(E, "VLA pointer inc/dec");
+ }
+
+ llvm::Value *NextVal;
+ if (const llvm::PointerType *PT =
+ dyn_cast<llvm::PointerType>(InVal->getType())) {
+ llvm::Constant *Inc =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
+ if (!isa<llvm::FunctionType>(PT->getElementType())) {
+ QualType PTEE = ValTy->getPointeeType();
+ if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ int size = getContext().getTypeSize(OIT) / 8;
+ if (!isInc)
+ size = -size;
+ Inc = llvm::ConstantInt::get(Inc->getType(), size);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ InVal = Builder.CreateBitCast(InVal, i8Ty);
+ NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
+ llvm::Value *lhs = LV.getAddress();
+ lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
+ LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy));
+ } else
+ NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+ } else {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+ NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+ NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ }
+ } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) {
+ // Bool++ is an interesting case, due to promotion rules, we get:
+ // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+ // Bool = ((int)Bool+1) != 0
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ NextVal = llvm::ConstantInt::getTrue(VMContext);
+ } else if (isa<llvm::IntegerType>(InVal->getType())) {
+ NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+
+ // Signed integer overflow is undefined behavior.
+ if (ValTy->isSignedIntegerType())
+ NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ else
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ } else {
+ // Add the inc/dec to the real part.
+ if (InVal->getType()->isFloatTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType()->isDoubleTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(AmountVal)));
+ else {
+ llvm::APFloat F(static_cast<float>(AmountVal));
+ bool ignored;
+ F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ NextVal = llvm::ConstantFP::get(VMContext, F);
+ }
+ NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitField())
+ EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+ else
+ EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? NextVal : InVal;
+}
+
+
+CodeGenFunction::ComplexPairTy CodeGenFunction::
+EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ } else {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ }
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+
+//===----------------------------------------------------------------------===//
+// LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+ if (Ty->isVoidType())
+ return RValue::get(0);
+
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ const llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return RValue::getComplex(std::make_pair(U, U));
+ }
+
+ if (hasAggregateLLVMType(Ty)) {
+ const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty));
+ return RValue::getAggregate(llvm::UndefValue::get(LTy));
+ }
+
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+ return LValue::MakeAddr(llvm::UndefValue::get(Ty),
+ MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
+ LValue LV = EmitLValue(E);
+ if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
+ EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8);
+ return LV;
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield reference.
+/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
+/// an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of the
+/// LLVM value is known: For example, it may not be a pointer to an integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed size,
+/// this method guarantees that the returned pointer type will point to an LLVM
+/// type of the same size of the lvalue's type. If the lvalue has a variable
+/// length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+ switch (E->getStmtClass()) {
+ default: return EmitUnsupportedLValue(E, "l-value expression");
+
+ case Expr::ObjCIsaExprClass:
+ return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
+ case Expr::BinaryOperatorClass:
+ return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CompoundAssignOperatorClass:
+ return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
+ case Expr::CallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ return EmitCallExprLValue(cast<CallExpr>(E));
+ case Expr::VAArgExprClass:
+ return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+ case Expr::DeclRefExprClass:
+ return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::PredefinedExprClass:
+ return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ case Expr::StringLiteralClass:
+ return EmitStringLiteralLValue(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+
+ case Expr::BlockDeclRefExprClass:
+ return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass:
+ return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+ case Expr::CXXBindTemporaryExprClass:
+ return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::CXXExprWithTemporariesClass:
+ return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
+ case Expr::CXXZeroInitValueExprClass:
+ return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E));
+ case Expr::CXXDefaultArgExprClass:
+ return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+ case Expr::CXXTypeidExprClass:
+ return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
+
+ case Expr::ObjCMessageExprClass:
+ return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+ case Expr::ObjCIvarRefExprClass:
+ return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+ case Expr::ObjCPropertyRefExprClass:
+ return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E));
+ case Expr::ObjCSuperExprClass:
+ return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
+
+ case Expr::StmtExprClass:
+ return EmitStmtExprLValue(cast<StmtExpr>(E));
+ case Expr::UnaryOperatorClass:
+ return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+ case Expr::ArraySubscriptExprClass:
+ return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::ExtVectorElementExprClass:
+ return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::MemberExprClass:
+ return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::CompoundLiteralExprClass:
+ return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+ case Expr::ConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+ case Expr::ChooseExprClass:
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ return EmitCastLValue(cast<CastExpr>(E));
+ }
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ QualType Ty) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
+ if (Volatile)
+ Load->setVolatile(true);
+
+ // Bool can have different representation in memory than in registers.
+ llvm::Value *V = Load;
+ if (Ty->isBooleanType())
+ if (V->getType() != llvm::Type::getInt1Ty(VMContext))
+ V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool");
+
+ return V;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, QualType Ty) {
+
+ if (Ty->isBooleanType()) {
+ // Bool can have different representation in memory than in registers.
+ const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
+ Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false);
+ }
+ Builder.CreateStore(Value, Addr, Volatile);
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
+/// method emits the address of the lvalue, then loads the result as an rvalue,
+/// returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
+ if (LV.isObjCWeak()) {
+ // load of a __weak object.
+ llvm::Value *AddrWeakObj = LV.getAddress();
+ return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj));
+ }
+
+ if (LV.isSimple()) {
+ llvm::Value *Ptr = LV.getAddress();
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+
+ // Simple scalar l-value.
+ //
+ // FIXME: We shouldn't have to use isSingleValueType here.
+ if (EltTy->isSingleValueType())
+ return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
+ ExprType));
+
+ assert(ExprType->isFunctionType() && "Unknown scalar value");
+ return RValue::get(Ptr);
+ }
+
+ if (LV.isVectorElt()) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+ return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+ "vecext"));
+ }
+
+ // If this is a reference to a subset of the elements of a vector, either
+ // shuffle the input or extract/insert them as appropriate.
+ if (LV.isExtVectorElt())
+ return EmitLoadOfExtVectorElementLValue(LV, ExprType);
+
+ if (LV.isBitField())
+ return EmitLoadOfBitfieldLValue(LV, ExprType);
+
+ if (LV.isPropertyRef())
+ return EmitLoadOfPropertyRefLValue(LV, ExprType);
+
+ assert(LV.isKVCRef() && "Unknown LValue type!");
+ return EmitLoadOfKVCRefLValue(LV, ExprType);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
+ QualType ExprType) {
+ const CGBitFieldInfo &Info = LV.getBitFieldInfo();
+
+ // Get the output type.
+ const llvm::Type *ResLTy = ConvertType(ExprType);
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Compute the result as an OR of all of the individual component accesses.
+ llvm::Value *Res = 0;
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = LV.getBitFieldBaseAddr();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (AI.FieldByteOffset) {
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ }
+
+ // Cast to the access type.
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ ExprType.getAddressSpace());
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Perform the load.
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Load->setAlignment(AI.AccessAlignment);
+
+ // Shift out unused low bits and mask out unused high bits.
+ llvm::Value *Val = Load;
+ if (AI.FieldBitStart)
+ Val = Builder.CreateLShr(Load, AI.FieldBitStart);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
+ AI.TargetBitWidth),
+ "bf.clear");
+
+ // Extend or truncate to the target size.
+ if (AI.AccessWidth < ResSizeInBits)
+ Val = Builder.CreateZExt(Val, ResLTy);
+ else if (AI.AccessWidth > ResSizeInBits)
+ Val = Builder.CreateTrunc(Val, ResLTy);
+
+ // Shift into place, and OR into the result.
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateShl(Val, AI.TargetBitOffset);
+ Res = Res ? Builder.CreateOr(Res, Val) : Val;
+ }
+
+ // If the bit-field is signed, perform the sign-extension.
+ //
+ // FIXME: This can easily be folded into the load of the high bits, which
+ // could also eliminate the mask of high bits in some situations.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
+ ExtraBits, "bf.val.sext");
+ }
+
+ return RValue::get(Res);
+}
+
+RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getPropertyRefExpr());
+}
+
+RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getKVCRefExpr());
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
+ QualType ExprType) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+
+ // If the result of the expression is a non-vector type, we must be extracting
+ // a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = ExprType->getAs<VectorType>();
+ if (!ExprVT) {
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), InIdx);
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
+ }
+
+ // Always use shuffle vector to try to retain the original program structure
+ unsigned NumResultElts = ExprVT->getNumElements();
+
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumResultElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), InIdx));
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ return RValue::get(Vec);
+}
+
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+ QualType Ty) {
+ if (!Dst.isSimple()) {
+ if (Dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getVectorIdx(), "vecins");
+ Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+ return;
+ }
+
+ // If this is an update of extended vector elements, insert them as
+ // appropriate.
+ if (Dst.isExtVectorElt())
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
+
+ if (Dst.isBitField())
+ return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
+
+ if (Dst.isPropertyRef())
+ return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
+
+ assert(Dst.isKVCRef() && "Unknown LValue type");
+ return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
+ }
+
+ if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+ // load of a __weak object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+ // load of a __strong object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ if (Dst.isObjCIvar()) {
+ assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
+ const llvm::Type *ResultType = ConvertType(getContext().LongTy);
+ llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
+ llvm::Value *dst = RHS;
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ llvm::Value *LHS =
+ Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+ llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
+ BytesBetween);
+ } else if (Dst.isGlobalObjCRef())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
+ else
+ CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ assert(Src.isScalar() && "Can't emit an agg store with this method");
+ EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
+ Dst.isVolatileQualified(), Ty);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ QualType Ty,
+ llvm::Value **Result) {
+ const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
+
+ // Get the output type.
+ const llvm::Type *ResLTy = ConvertTypeForMem(Ty);
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Get the source value, truncated to the width of the bit-field.
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (Ty->isBooleanType())
+ SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
+
+ SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ Info.getSize()),
+ "bf.value");
+
+ // Return the new value of the bit-field, if requested.
+ if (Result) {
+ // Cast back to the proper type for result.
+ const llvm::Type *SrcTy = Src.getScalarVal()->getType();
+ llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
+ "bf.reload.val");
+
+ // Sign extend if necessary.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
+ ExtraBits, "bf.reload.sext");
+ }
+
+ *Result = ReloadVal;
+ }
+
+ // Iterate over the components, writing each piece to memory.
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (AI.FieldByteOffset) {
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ }
+
+ // Cast to the access type.
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ Ty.getAddressSpace());
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Extract the piece of the bit-field value to write in this access, limited
+ // to the values that are part of this access.
+ llvm::Value *Val = SrcVal;
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ AI.TargetBitWidth));
+
+ // Extend or truncate to the access size.
+ const llvm::Type *AccessLTy =
+ llvm::Type::getIntNTy(VMContext, AI.AccessWidth);
+ if (ResSizeInBits < AI.AccessWidth)
+ Val = Builder.CreateZExt(Val, AccessLTy);
+ else if (ResSizeInBits > AI.AccessWidth)
+ Val = Builder.CreateTrunc(Val, AccessLTy);
+
+ // Shift into the position in memory.
+ if (AI.FieldBitStart)
+ Val = Builder.CreateShl(Val, AI.FieldBitStart);
+
+ // If necessary, load and OR in bits that are outside of the bit-field.
+ if (AI.TargetBitWidth != AI.AccessWidth) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Load->setAlignment(AI.AccessAlignment);
+
+ // Compute the mask for zeroing the bits that are part of the bit-field.
+ llvm::APInt InvMask =
+ ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
+ AI.FieldBitStart + AI.TargetBitWidth);
+
+ // Apply the mask and OR in to the value to write.
+ Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
+ }
+
+ // Write the value.
+ llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
+ Dst.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Store->setAlignment(AI.AccessAlignment);
+ }
+}
+
+void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ // This access turns into a read/modify/write of the vector. Load the input
+ // value now.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (const VectorType *VTy = Ty->getAs<VectorType>()) {
+ unsigned NumSrcElts = VTy->getNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ if (NumDstElts == NumSrcElts) {
+ // Use shuffle vector is the src and destination are the same number of
+ // elements and restore the vector mask since it is on the side it will be
+ // stored.
+ llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask[InIdx] = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), i);
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ } else if (NumDstElts > NumSrcElts) {
+ // Extended the source vector to the same length and then shuffle it
+ // into the destination.
+ // FIXME: since we're shuffling with undef, can we just use the indices
+ // into that? This could be simpler.
+ llvm::SmallVector<llvm::Constant*, 4> ExtMask;
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ unsigned i;
+ for (i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+ for (; i != NumDstElts; ++i)
+ ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
+ ExtMask.size());
+ llvm::Value *ExtSrcVal =
+ Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(SrcVal->getType()),
+ ExtMaskV, "tmp");
+ // build identity
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+
+ // modify when what gets shuffled in
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned Idx = getAccessedFieldNo(i, Elts);
+ Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
+ }
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
+ } else {
+ // We should never shorten the vector
+ assert(0 && "unexpected shorten vector length");
+ }
+ } else {
+ // If the Src is a scalar (not a vector) it must be updating one element.
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
+ }
+
+ Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+}
+
+// setObjCGCLValueClass - sets class of he lvalue for the purpose of
+// generating write-barries API. It is currently a global, ivar,
+// or neither.
+static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
+ LValue &LV) {
+ if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return;
+
+ if (isa<ObjCIvarRefExpr>(E)) {
+ LV.SetObjCIvar(LV, true);
+ ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
+ LV.setBaseIvarExp(Exp->getBase());
+ LV.SetObjCArray(LV, E->getType()->isArrayType());
+ return;
+ }
+
+ if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
+ if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
+ VD->isFileVarDecl())
+ LV.SetGlobalObjCRef(LV, true);
+ }
+ LV.SetObjCArray(LV, E->getType()->isArrayType());
+ return;
+ }
+
+ if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
+ if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ if (LV.isObjCIvar()) {
+ // If cast is to a structure pointer, follow gcc's behavior and make it
+ // a non-ivar write-barrier.
+ QualType ExpTy = E->getType();
+ if (ExpTy->isPointerType())
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType())
+ LV.SetObjCIvar(LV, false);
+ }
+ return;
+ }
+ if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
+ if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
+ if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ if (LV.isObjCIvar() && !LV.isObjCArray())
+ // Using array syntax to assigning to what an ivar points to is not
+ // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
+ LV.SetObjCIvar(LV, false);
+ else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
+ // Using array syntax to assigning to what global points to is not
+ // same as assigning to the global itself. {id *G;} G[i] = 0;
+ LV.SetGlobalObjCRef(LV, false);
+ return;
+ }
+
+ if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ // We don't know if member is an 'ivar', but this flag is looked at
+ // only in the context of LV.isObjCIvar().
+ LV.SetObjCArray(LV, E->getType()->isArrayType());
+ return;
+ }
+}
+
+static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const VarDecl *VD) {
+ assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
+ "Var decl must have external storage or be a file var decl!");
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ if (VD->getType()->isReferenceType())
+ V = CGF.Builder.CreateLoad(V, "tmp");
+ LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType()));
+ setObjCGCLValueClass(CGF.getContext(), E, LV);
+ return LV;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const FunctionDecl *FD) {
+ llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD);
+ if (!FD->hasPrototype()) {
+ if (const FunctionProtoType *Proto =
+ FD->getType()->getAs<FunctionProtoType>()) {
+ // Ugly case: for a K&R-style definition, the type of the definition
+ // isn't the same as the type of a use. Correct for this with a
+ // bitcast.
+ QualType NoProtoType =
+ CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
+ NoProtoType = CGF.getContext().getPointerType(NoProtoType);
+ V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp");
+ }
+ }
+ return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+ const NamedDecl *ND = E->getDecl();
+
+ if (ND->hasAttr<WeakRefAttr>()) {
+ const ValueDecl* VD = cast<ValueDecl>(ND);
+ llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
+
+ Qualifiers Quals = MakeQualifiers(E->getType());
+ LValue LV = LValue::MakeAddr(Aliasee, Quals);
+
+ return LV;
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+
+ // Check if this is a global variable.
+ if (VD->hasExternalStorage() || VD->isFileVarDecl())
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
+
+ llvm::Value *V = LocalDeclMap[VD];
+ if (!V && getContext().getLangOptions().CPlusPlus &&
+ VD->isStaticLocal())
+ V = CGM.getStaticLocalDeclAddress(VD);
+ assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+
+ Qualifiers Quals = MakeQualifiers(E->getType());
+ // local variables do not get their gc attribute set.
+ // local static?
+ if (NonGCable) Quals.removeObjCGCAttr();
+
+ if (VD->hasAttr<BlocksAttr>()) {
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
+ VD->getNameAsString());
+ }
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+ LValue LV = LValue::MakeAddr(V, Quals);
+ LValue::SetObjCNonGC(LV, NonGCable);
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ // FIXME: the qualifier check does not seem sufficient here
+ if (E->getQualifier()) {
+ const FieldDecl *FD = cast<FieldDecl>(ND);
+ llvm::Value *V = CGM.EmitPointerToDataMember(FD);
+
+ return LValue::MakeAddr(V, MakeQualifiers(FD->getType()));
+ }
+
+ assert(false && "Unhandled DeclRefExpr");
+
+ // an invalid LValue, but the assert will
+ // ensure that this point is never reached.
+ return LValue();
+}
+
+LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
+ return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+ // __extension__ doesn't affect lvalue-ness.
+ if (E->getOpcode() == UnaryOperator::Extension)
+ return EmitLValue(E->getSubExpr());
+
+ QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+ switch (E->getOpcode()) {
+ default: assert(0 && "Unknown unary operator lvalue!");
+ case UnaryOperator::Deref: {
+ QualType T = E->getSubExpr()->getType()->getPointeeType();
+ assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+ Qualifiers Quals = MakeQualifiers(T);
+ Quals.setAddressSpace(ExprTy.getAddressSpace());
+
+ LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals);
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ unsigned Idx = E->getOpcode() == UnaryOperator::Imag;
+ return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(),
+ Idx, "idx"),
+ MakeQualifiers(ExprTy));
+ }
+ case UnaryOperator::PreInc:
+ case UnaryOperator::PreDec: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ bool isInc = E->getOpcode() == UnaryOperator::PreInc;
+
+ if (E->getType()->isAnyComplexType())
+ EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ else
+ EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ return LV;
+ }
+ }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E),
+ Qualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+ Qualifiers());
+}
+
+
+LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) {
+ std::string GlobalVarName;
+
+ switch (Type) {
+ default: assert(0 && "Invalid type");
+ case PredefinedExpr::Func:
+ GlobalVarName = "__func__.";
+ break;
+ case PredefinedExpr::Function:
+ GlobalVarName = "__FUNCTION__.";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ GlobalVarName = "__PRETTY_FUNCTION__.";
+ break;
+ }
+
+ llvm::StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ GlobalVarName += FnName;
+
+ std::string FunctionName =
+ PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurCodeDecl);
+
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ return LValue::MakeAddr(C, Qualifiers());
+}
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ switch (E->getIdentType()) {
+ default:
+ return EmitUnsupportedLValue(E, "predefined expression");
+ case PredefinedExpr::Func:
+ case PredefinedExpr::Function:
+ case PredefinedExpr::PrettyFunction:
+ return EmitPredefinedFunctionName(E->getIdentType());
+ }
+}
+
+llvm::BasicBlock *CodeGenFunction::getTrapBB() {
+ const CodeGenOptions &GCO = CGM.getCodeGenOpts();
+
+ // If we are not optimzing, don't collapse all calls to trap in the function
+ // to the same call, that way, in the debugger they can see which operation
+ // did in fact fail. If we are optimizing, we collpase all call to trap down
+ // to just one per function to save on codesize.
+ if (GCO.OptimizationLevel
+ && TrapBB)
+ return TrapBB;
+
+ llvm::BasicBlock *Cont = 0;
+ if (HaveInsertPoint()) {
+ Cont = createBasicBlock("cont");
+ EmitBranch(Cont);
+ }
+ TrapBB = createBasicBlock("trap");
+ EmitBlock(TrapBB);
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0);
+ llvm::CallInst *TrapCall = Builder.CreateCall(F);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ if (Cont)
+ EmitBlock(Cont);
+ return TrapBB;
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // The index must always be an integer, which is not an aggregate. Emit it.
+ llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerType();
+
+ // If the base is a vector type, then we are forming a vector element lvalue
+ // with this subscript.
+ if (E->getBase()->getType()->isVectorType()) {
+ // Emit the vector as an lvalue to get its address.
+ LValue LHS = EmitLValue(E->getBase());
+ assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+ Idx = Builder.CreateIntCast(Idx,
+ llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx");
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+ E->getBase()->getType().getCVRQualifiers());
+ }
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (IdxBitwidth != LLVMPointerWidth)
+ Idx = Builder.CreateIntCast(Idx,
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth),
+ IdxSigned, "idxprom");
+
+ // FIXME: As llvm implements the object size checking, this can come out.
+ if (CatchUndefined) {
+ if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
+ if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) {
+ if (const ConstantArrayType *CAT
+ = getContext().getAsConstantArrayType(DRE->getType())) {
+ llvm::APInt Size = CAT->getSize();
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
+ llvm::ConstantInt::get(Idx->getType(), Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+ }
+ }
+ }
+ }
+ }
+
+ // We know that the pointer points to a type of the correct size, unless the
+ // size is a VLA or Objective-C interface.
+ llvm::Value *Address = 0;
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(E->getType())) {
+ llvm::Value *VLASize = GetVLASize(VAT);
+
+ Idx = Builder.CreateMul(Idx, VLASize);
+
+ QualType BaseType = getContext().getBaseElementType(VAT);
+
+ CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType);
+ Idx = Builder.CreateUDiv(Idx,
+ llvm::ConstantInt::get(Idx->getType(),
+ BaseTypeSize.getQuantity()));
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ } else if (const ObjCObjectType *OIT =
+ E->getType()->getAs<ObjCObjectType>()) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ getContext().getTypeSizeInChars(OIT).getQuantity());
+
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
+ Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
+ } else {
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ }
+
+ QualType T = E->getBase()->getType()->getPointeeType();
+ assert(!T.isNull() &&
+ "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
+ Qualifiers Quals = MakeQualifiers(T);
+ Quals.setAddressSpace(E->getBase()->getType().getAddressSpace());
+
+ LValue LV = LValue::MakeAddr(Address, Quals);
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
+ LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ setObjCGCLValueClass(getContext(), E, LV);
+ }
+ return LV;
+}
+
+static
+llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
+ llvm::SmallVector<unsigned, 4> &Elts) {
+ llvm::SmallVector<llvm::Constant*, 4> CElts;
+
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i)
+ CElts.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), Elts[i]));
+
+ return llvm::ConstantVector::get(&CElts[0], CElts.size());
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+
+ // Emit the base vector as an l-value.
+ LValue Base;
+
+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+ if (E->isArrow()) {
+ // If it is a pointer to a vector, emit the address and form an lvalue with
+ // it.
+ llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
+ Qualifiers Quals = MakeQualifiers(PT->getPointeeType());
+ Quals.removeObjCGCAttr();
+ Base = LValue::MakeAddr(Ptr, Quals);
+ } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) {
+ // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
+ // emit the base as an lvalue.
+ assert(E->getBase()->getType()->isVectorType());
+ Base = EmitLValue(E->getBase());
+ } else {
+ // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
+ assert(E->getBase()->getType()->getAs<VectorType>() &&
+ "Result must be a vector");
+ llvm::Value *Vec = EmitScalarExpr(E->getBase());
+
+ // Store the vector to memory (because LValue wants an address).
+ llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
+ Builder.CreateStore(Vec, VecMem);
+ Base = LValue::MakeAddr(VecMem, Qualifiers());
+ }
+
+ // Encode the element access list into a vector of unsigned indices.
+ llvm::SmallVector<unsigned, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Base.isSimple()) {
+ llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV,
+ Base.getVRQualifiers());
+ }
+ assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+ llvm::Constant *BaseElts = Base.getExtVectorElts();
+ llvm::SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
+ if (isa<llvm::ConstantAggregateZero>(BaseElts))
+ CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
+ else
+ CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
+ }
+ llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
+ Base.getVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+ bool isNonGC = false;
+ Expr *BaseExpr = E->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy =
+ BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) ||
+ isa<ObjCImplicitSetterGetterRefExpr>(
+ BaseExpr->IgnoreParens())) {
+ RValue RV = EmitObjCPropertyGet(BaseExpr);
+ BaseValue = RV.getAggregateAddr();
+ BaseQuals = BaseExpr->getType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ if (BaseLV.isNonGC())
+ isNonGC = true;
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ NamedDecl *ND = E->getMemberDecl();
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
+ LValue LV = EmitLValueForField(BaseValue, Field,
+ BaseQuals.getCVRQualifiers());
+ LValue::SetObjCNonGC(LV, isNonGC);
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ assert(false && "Unhandled member declaration!");
+ return LValue();
+}
+
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
+ return LValue::MakeBitfield(BaseValue, Info,
+ Field->getType().getCVRQualifiers()|CVRQualifiers);
+}
+
+/// EmitLValueForAnonRecordField - Given that the field is a member of
+/// an anonymous struct or union buried inside a record, and given
+/// that the base value is a pointer to the enclosing record, derive
+/// an lvalue for the ultimate field.
+LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
+ const FieldDecl *Field,
+ unsigned CVRQualifiers) {
+ llvm::SmallVector<const FieldDecl *, 8> Path;
+ Path.push_back(Field);
+
+ while (Field->getParent()->isAnonymousStructOrUnion()) {
+ const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject();
+ if (!isa<FieldDecl>(VD)) break;
+ Field = cast<FieldDecl>(VD);
+ Path.push_back(Field);
+ }
+
+ llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator
+ I = Path.rbegin(), E = Path.rend();
+ while (true) {
+ LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers);
+ if (++I == E) return LV;
+
+ assert(LV.isSimple());
+ BaseValue = LV.getAddress();
+ CVRQualifiers |= LV.getVRQualifiers();
+ }
+}
+
+LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ if (Field->isBitField())
+ return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
+
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+ // Match union field type.
+ if (Field->getParent()->isUnion()) {
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(Field->getType());
+ const llvm::PointerType * BaseTy =
+ cast<llvm::PointerType>(BaseValue->getType());
+ unsigned AS = BaseTy->getAddressSpace();
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::get(FieldTy, AS),
+ "tmp");
+ }
+ if (Field->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+
+ Qualifiers Quals = MakeQualifiers(Field->getType());
+ Quals.addCVRQualifiers(CVRQualifiers);
+ // __weak attribute on a field is ignored.
+ if (Quals.getObjCGCAttr() == Qualifiers::Weak)
+ Quals.removeObjCGCAttr();
+
+ return LValue::MakeAddr(V, Quals);
+}
+
+LValue
+CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ QualType FieldType = Field->getType();
+
+ if (!FieldType->isReferenceType())
+ return EmitLValueForField(BaseValue, Field, CVRQualifiers);
+
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+ assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+
+ return LValue::MakeAddr(V, MakeQualifiers(FieldType));
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
+ llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
+ const Expr* InitExpr = E->getInitializer();
+ LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType()));
+
+ EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false);
+
+ return Result;
+}
+
+LValue
+CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
+ if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+ if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) {
+ Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS();
+ if (Live)
+ return EmitLValue(Live);
+ }
+
+ if (!E->getLHS())
+ return EmitUnsupportedLValue(E, "conditional operator with missing LHS");
+
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = createBasicBlock("cond.end");
+
+ EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBlock(LHSBlock);
+ LValue LHS = EmitLValue(E->getLHS());
+ EndConditionalBranch();
+
+ if (!LHS.isSimple())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ // FIXME: We shouldn't need an alloca for this.
+ llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp");
+ Builder.CreateStore(LHS.getAddress(), Temp);
+ EmitBranch(ContBlock);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBlock(RHSBlock);
+ LValue RHS = EmitLValue(E->getRHS());
+ EndConditionalBranch();
+ if (!RHS.isSimple())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ Builder.CreateStore(RHS.getAddress(), Temp);
+ EmitBranch(ContBlock);
+
+ EmitBlock(ContBlock);
+
+ Temp = Builder.CreateLoad(Temp, "lv");
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+ }
+
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+
+ return EmitAggExprToLValue(E);
+}
+
+/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
+/// If the cast is a dynamic_cast, we can have the usual lvalue result,
+/// otherwise if a cast is needed by the code generator in an lvalue context,
+/// then it must mean that we need the address of an aggregate in order to
+/// access one of its fields. This can happen for all the reasons that casts
+/// are permitted with aggregate result, including noop aggregate casts, and
+/// cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+
+ case CastExpr::CK_Dynamic: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = LV.getAddress();
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
+ return LValue::MakeAddr(EmitDynamicCast(V, DCE),
+ MakeQualifiers(E->getType()));
+ }
+
+ case CastExpr::CK_NoOp: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ if (LV.isPropertyRef()) {
+ QualType QT = E->getSubExpr()->getType();
+ RValue RV = EmitLoadOfPropertyRefLValue(LV, QT);
+ assert(!RV.isScalar() && "EmitCastLValue - scalar cast of property ref");
+ llvm::Value *V = RV.getAggregateAddr();
+ return LValue::MakeAddr(V, MakeQualifiers(QT));
+ }
+ return LV;
+ }
+ case CastExpr::CK_ConstructorConversion:
+ case CastExpr::CK_UserDefinedConversion:
+ case CastExpr::CK_AnyPointerToObjCPointerCast:
+ return EmitLValue(E->getSubExpr());
+
+ case CastExpr::CK_UncheckedDerivedToBase:
+ case CastExpr::CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getSubExpr()->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Perform the derived-to-base conversion
+ llvm::Value *Base =
+ GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl,
+ E->getBasePath(), /*NullCheckValue=*/false);
+
+ return LValue::MakeAddr(Base, MakeQualifiers(E->getType()));
+ }
+ case CastExpr::CK_ToUnion:
+ return EmitAggExprToLValue(E);
+ case CastExpr::CK_BaseToDerived: {
+ const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Perform the base-to-derived conversion
+ llvm::Value *Derived =
+ GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
+ E->getBasePath(),/*NullCheckValue=*/false);
+
+ return LValue::MakeAddr(Derived, MakeQualifiers(E->getType()));
+ }
+ case CastExpr::CK_BitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(CE->getTypeAsWritten()));
+ return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ }
+ }
+}
+
+LValue CodeGenFunction::EmitNullInitializationLValue(
+ const CXXZeroInitValueExpr *E) {
+ QualType Ty = E->getType();
+ LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty));
+ EmitNullInitialization(LV.getAddress(), Ty);
+ return LV;
+}
+
+//===--------------------------------------------------------------------===//
+// Expression Emission
+//===--------------------------------------------------------------------===//
+
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ // Builtins never have block type.
+ if (E->getCallee()->getType()->isBlockPointerType())
+ return EmitBlockCallExpr(E, ReturnValue);
+
+ if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
+ return EmitCXXMemberCallExpr(CE, ReturnValue);
+
+ const Decl *TargetDecl = 0;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
+ if (unsigned builtinID = FD->getBuiltinID())
+ return EmitBuiltinExpr(FD, builtinID, E);
+ }
+ }
+
+ if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
+
+ if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ return RValue::get(0);
+ }
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+ E->arg_begin(), E->arg_end(), TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+ // Comma expressions just emit their LHS then their RHS as an l-value.
+ if (E->getOpcode() == BinaryOperator::Comma) {
+ EmitAnyExpr(E->getLHS());
+ EnsureInsertPoint();
+ return EmitLValue(E->getRHS());
+ }
+
+ if (E->getOpcode() == BinaryOperator::PtrMemD ||
+ E->getOpcode() == BinaryOperator::PtrMemI)
+ return EmitPointerToDataMemberBinaryExpr(E);
+
+ // Can only get l-value for binary operator expressions which are a
+ // simple assignment of aggregate type.
+ if (E->getOpcode() != BinaryOperator::Assign)
+ return EmitUnsupportedLValue(E, "binary l-value expression");
+
+ if (!hasAggregateLLVMType(E->getType())) {
+ // Emit the LHS as an l-value.
+ LValue LV = EmitLValue(E->getLHS());
+
+ llvm::Value *RHS = EmitScalarExpr(E->getRHS());
+ EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(),
+ E->getType());
+ return LV;
+ }
+
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+ RValue RV = EmitCallExpr(E);
+
+ if (!RV.isScalar())
+ return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType()));
+
+ assert(E->getCallReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+ // FIXME: This shouldn't require another copy.
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+ llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp");
+ EmitCXXConstructExpr(Temp, E);
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+}
+
+LValue
+CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
+ llvm::Value *Temp = EmitCXXTypeidExpr(E);
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ PushCXXTemporary(E->getTemporary(), LV.getAddress());
+ return LV;
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitObjCMessageExpr(E);
+ // FIXME: can this be volatile?
+ return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+ Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+ // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+ llvm::Value *BaseValue = 0;
+ const Expr *BaseExpr = E->getBase();
+ Qualifiers BaseQuals;
+ QualType ObjectTy;
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ ObjectTy = BaseExpr->getType()->getPointeeType();
+ BaseQuals = ObjectTy.getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ ObjectTy = BaseExpr->getType();
+ BaseQuals = ObjectTy.getQualifiers();
+ }
+
+ LValue LV =
+ EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
+ BaseQuals.getCVRQualifiers());
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+}
+
+LValue
+CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or store
+ // through it.
+ return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCKVCRefLValue(
+ const ObjCImplicitSetterGetterRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or store
+ // through it.
+ return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
+ return EmitUnsupportedLValue(E, "use of super");
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitAnyExprToTemp(E);
+ return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+}
+
+RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl) {
+ // Get the actual function type. The callee type will always be a pointer to
+ // function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
+ "Call must have function pointer type!");
+
+ CalleeType = getContext().getCanonicalType(CalleeType);
+
+ const FunctionType *FnType
+ = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+ QualType ResultType = FnType->getResultType();
+
+ CallArgList Args;
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
+
+ return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType),
+ Callee, ReturnValue, Args, TargetDecl);
+}
+
+LValue CodeGenFunction::
+EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
+ llvm::Value *BaseV;
+ if (E->getOpcode() == BinaryOperator::PtrMemI)
+ BaseV = EmitScalarExpr(E->getLHS());
+ else
+ BaseV = EmitLValue(E->getLHS()).getAddress();
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext());
+ BaseV = Builder.CreateBitCast(BaseV, i8Ty);
+ llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
+ llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr");
+
+ QualType Ty = E->getRHS()->getType();
+ Ty = Ty->getAs<MemberPointerType>()->getPointeeType();
+
+ const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty));
+ AddV = Builder.CreateBitCast(AddV, PType);
+ return LValue::MakeAddr(AddV, MakeQualifiers(Ty));
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..a4e64fb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,871 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ llvm::Value *DestPtr;
+ bool VolatileDest;
+ bool IgnoreResult;
+ bool IsInitializer;
+ bool RequiresGCollection;
+
+ ReturnValueSlot getReturnValueSlot() const {
+ // If the destination slot requires garbage collection, we can't
+ // use the real return value slot, because we have to use the GC
+ // API.
+ if (RequiresGCollection) return ReturnValueSlot();
+
+ return ReturnValueSlot(DestPtr, VolatileDest);
+ }
+
+public:
+ AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
+ bool ignore, bool isinit, bool requiresGCollection)
+ : CGF(cgf), Builder(CGF.Builder),
+ DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore),
+ IsInitializer(isinit), RequiresGCollection(requiresGCollection) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+ /// represents a value lvalue, this method emits the address of the lvalue,
+ /// then loads the result into DestPtr.
+ void EmitAggLoadOfLValue(const Expr *E);
+
+ /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+ void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+
+ void EmitGCMove(const Expr *E, RValue Src);
+
+ bool TypeRequiresGCollection(QualType T);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ void VisitStmt(Stmt *S) {
+ CGF.ErrorUnsupported(S, "aggregate expression");
+ }
+ void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+
+ // l-values.
+ void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
+ void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+ void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+ void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ // Operators.
+ void VisitCastExpr(CastExpr *E);
+ void VisitCallExpr(const CallExpr *E);
+ void VisitStmtExpr(const StmtExpr *E);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
+ void VisitBinAssign(const BinaryOperator *E);
+ void VisitBinComma(const BinaryOperator *E);
+ void VisitUnaryAddrOf(const UnaryOperator *E);
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+ void VisitObjCImplicitSetterGetterRefExpr(ObjCImplicitSetterGetterRefExpr *E);
+
+ void VisitConditionalOperator(const ConditionalOperator *CO);
+ void VisitChooseExpr(const ChooseExpr *CE);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ Visit(DAE->getExpr());
+ }
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+
+ void VisitVAArgExpr(VAArgExpr *E);
+
+ void EmitInitializationToLValue(Expr *E, LValue Address, QualType T);
+ void EmitNullInitializationToLValue(LValue Address, QualType T);
+ // case Expr::ChooseExprClass:
+ void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+/// \brief True if the given aggregate type requires special GC API calls.
+bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
+ // Only record types have members that might require garbage collection.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy) return false;
+
+ // Don't mess with non-trivial C++ types.
+ RecordDecl *Record = RecordTy->getDecl();
+ if (isa<CXXRecordDecl>(Record) &&
+ (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
+ !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
+ return false;
+
+ // Check whether the type has an object member.
+ return Record->hasObjectMember();
+}
+
+/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
+///
+/// The idea is that you do something like this:
+/// RValue Result = EmitSomething(..., getReturnValueSlot());
+/// EmitGCMove(E, Result);
+/// If GC doesn't interfere, this will cause the result to be emitted
+/// directly into the return value slot. If GC does interfere, a final
+/// move will be performed.
+void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
+ if (!RequiresGCollection) return;
+
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
+ Src.getAggregateAddr(),
+ E->getType());
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
+ assert(Src.isAggregate() && "value must be aggregate value!");
+
+ // If the result is ignored, don't copy from the value.
+ if (DestPtr == 0) {
+ if (!Src.isVolatileQualified() || (IgnoreResult && Ignore))
+ return;
+ // If the source is volatile, we must read from it; to do that, we need
+ // some place to put it.
+ DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp");
+ }
+
+ if (RequiresGCollection) {
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
+ DestPtr, Src.getAggregateAddr(),
+ E->getType());
+ return;
+ }
+ // If the result of the assignment is used, copy the LHS there also.
+ // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
+ // from the source as well, as we can't eliminate it if either operand
+ // is volatile, unless copy has volatile for both source and destination..
+ CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
+ VolatileDest|Src.isVolatileQualified());
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
+ assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
+
+ EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
+ Src.isVolatileQualified()),
+ Ignore);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+ if (!DestPtr && E->getCastKind() != CastExpr::CK_Dynamic) {
+ Visit(E->getSubExpr());
+ return;
+ }
+
+ switch (E->getCastKind()) {
+ default: assert(0 && "Unhandled cast kind!");
+
+ case CastExpr::CK_Dynamic: {
+ assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
+ // FIXME: Do we also need to handle property references here?
+ if (LV.isSimple())
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
+ else
+ CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
+
+ if (DestPtr)
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ break;
+ }
+
+ case CastExpr::CK_ToUnion: {
+ // GCC union extension
+ QualType PtrTy =
+ CGF.getContext().getPointerType(E->getSubExpr()->getType());
+ llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
+ CGF.ConvertType(PtrTy));
+ EmitInitializationToLValue(E->getSubExpr(),
+ LValue::MakeAddr(CastPtr, Qualifiers()),
+ E->getSubExpr()->getType());
+ break;
+ }
+
+ case CastExpr::CK_DerivedToBase:
+ case CastExpr::CK_BaseToDerived:
+ case CastExpr::CK_UncheckedDerivedToBase: {
+ assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
+ "should have been unpacked before we got here");
+ break;
+ }
+
+ // FIXME: Remove the CK_Unknown check here.
+ case CastExpr::CK_Unknown:
+ case CastExpr::CK_NoOp:
+ case CastExpr::CK_UserDefinedConversion:
+ case CastExpr::CK_ConstructorConversion:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+ break;
+
+ case CastExpr::CK_NullToMemberPointer: {
+ // If the subexpression's type is the C++0x nullptr_t, emit the
+ // subexpression, which may have side effects.
+ if (E->getSubExpr()->getType()->isNullPtrType())
+ Visit(E->getSubExpr());
+
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Value *NullValue = llvm::Constant::getNullValue(PtrDiffTy);
+ llvm::Value *Ptr = Builder.CreateStructGEP(DestPtr, 0, "ptr");
+ Builder.CreateStore(NullValue, Ptr, VolatileDest);
+
+ llvm::Value *Adj = Builder.CreateStructGEP(DestPtr, 1, "adj");
+ Builder.CreateStore(NullValue, Adj, VolatileDest);
+
+ break;
+ }
+
+ case CastExpr::CK_BitCast: {
+ // This must be a member function pointer cast.
+ Visit(E->getSubExpr());
+ break;
+ }
+
+ case CastExpr::CK_DerivedToBaseMemberPointer:
+ case CastExpr::CK_BaseToDerivedMemberPointer: {
+ QualType SrcType = E->getSubExpr()->getType();
+
+ llvm::Value *Src = CGF.CreateMemTemp(SrcType, "tmp");
+ CGF.EmitAggExpr(E->getSubExpr(), Src, SrcType.isVolatileQualified());
+
+ llvm::Value *SrcPtr = Builder.CreateStructGEP(Src, 0, "src.ptr");
+ SrcPtr = Builder.CreateLoad(SrcPtr);
+
+ llvm::Value *SrcAdj = Builder.CreateStructGEP(Src, 1, "src.adj");
+ SrcAdj = Builder.CreateLoad(SrcAdj);
+
+ llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
+ Builder.CreateStore(SrcPtr, DstPtr, VolatileDest);
+
+ llvm::Value *DstAdj = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
+
+ // Now See if we need to update the adjustment.
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(SrcType->getAs<MemberPointerType>()->
+ getClass()->getAs<RecordType>()->getDecl());
+ const CXXRecordDecl *DerivedDecl =
+ cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()->
+ getClass()->getAs<RecordType>()->getDecl());
+ if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+ std::swap(DerivedDecl, BaseDecl);
+
+ if (llvm::Constant *Adj =
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, E->getBasePath())) {
+ if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+ SrcAdj = Builder.CreateSub(SrcAdj, Adj, "adj");
+ else
+ SrcAdj = Builder.CreateAdd(SrcAdj, Adj, "adj");
+ }
+
+ Builder.CreateStore(SrcAdj, DstAdj, VolatileDest);
+ break;
+ }
+ }
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType()) {
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
+ CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest,
+ /*IgnoreResult=*/false, IsInitializer);
+}
+
+void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
+ // We have a member function pointer.
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ (void) MPT;
+ assert(MPT->getPointeeType()->isFunctionProtoType() &&
+ "Unexpected member pointer type!");
+
+ // The creation of member function pointers has no side effects; if
+ // there is no destination pointer, we have nothing to do.
+ if (!DestPtr)
+ return;
+
+ const DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(DRE->getDecl())->getCanonicalDecl();
+
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+
+ llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
+ llvm::Value *FuncPtr;
+
+ if (MD->isVirtual()) {
+ int64_t Index = CGF.CGM.getVTables().getMethodVTableIndex(MD);
+
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes =
+ CGF.CGM.getContext().Target.getPointerWidth(0) / 8;
+
+ // Itanium C++ ABI 2.3:
+ // For a non-virtual function, this field is a simple function pointer.
+ // For a virtual function, it is 1 plus the virtual table offset
+ // (in bytes) of the function, represented as a ptrdiff_t.
+ FuncPtr = llvm::ConstantInt::get(PtrDiffTy,
+ (Index * PointerWidthInBytes) + 1);
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGF.CGM.getTypes().GetFunctionType(CGF.CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Fn = CGF.CGM.GetAddrOfFunction(MD, Ty);
+ FuncPtr = llvm::ConstantExpr::getPtrToInt(Fn, PtrDiffTy);
+ }
+ Builder.CreateStore(FuncPtr, DstPtr, VolatileDest);
+
+ llvm::Value *AdjPtr = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
+
+ // The adjustment will always be 0.
+ Builder.CreateStore(llvm::ConstantInt::get(PtrDiffTy, 0), AdjPtr,
+ VolatileDest);
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() == BinaryOperator::PtrMemD ||
+ E->getOpcode() == BinaryOperator::PtrMemI)
+ VisitPointerToDataMemberBinaryOperator(E);
+ else
+ CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
+ const BinaryOperator *E) {
+ LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType())
+ && "Invalid assignment");
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // We have to special case property setters, otherwise we must have
+ // a simple lvalue (no aggregates inside vectors, bitfields).
+ if (LHS.isPropertyRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ } else if (LHS.isKVCRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ } else {
+ bool RequiresGCollection = false;
+ if (CGF.getContext().getLangOptions().getGCMode())
+ RequiresGCollection = TypeRequiresGCollection(E->getLHS()->getType());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
+ false, false, RequiresGCollection);
+ EmitFinalDestCopy(E, LHS, true);
+ }
+}
+
+void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
+ if (!E->getLHS()) {
+ CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+ return;
+ }
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for aggregate value");
+
+ Visit(E->getLHS());
+ CGF.EndConditionalBranch();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(RHSBlock);
+
+ Visit(E->getRHS());
+ CGF.EndConditionalBranch();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
+ Visit(CE->getChosenSubExpr(CGF.getContext()));
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+ return;
+ }
+
+ EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, Qualifiers()));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+
+ // FIXME: volatile
+ CGF.EmitAggExpr(E->getSubExpr(), Val, false);
+ } else
+ Visit(E->getSubExpr());
+
+ // Don't make this a live temporary if we're emitting an initializer expr.
+ if (!IsInitializer)
+ CGF.PushCXXTemporary(E->getTemporary(), Val);
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+ }
+
+ if (E->requiresZeroInitialization())
+ EmitNullInitializationToLValue(LValue::MakeAddr(Val,
+ // FIXME: Qualifiers()?
+ E->getType().getQualifiers()),
+ E->getType());
+
+ CGF.EmitCXXConstructExpr(Val, E);
+}
+
+void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ llvm::Value *Val = DestPtr;
+
+ CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
+}
+
+void AggExprEmitter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+ }
+ LValue LV = LValue::MakeAddr(Val, Qualifiers());
+ EmitNullInitializationToLValue(LV, E->getType());
+}
+
+void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateMemTemp(E->getType(), "tmp");
+ }
+ LValue LV = LValue::MakeAddr(Val, Qualifiers());
+ EmitNullInitializationToLValue(LV, E->getType());
+}
+
+void
+AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
+ // FIXME: Ignore result?
+ // FIXME: Are initializers affected by volatile?
+ if (isa<ImplicitValueInitExpr>(E)) {
+ EmitNullInitializationToLValue(LV, T);
+ } else if (T->isReferenceType()) {
+ RValue RV = CGF.EmitReferenceBindingToExpr(E, /*IsInitializer=*/false);
+ CGF.EmitStoreThroughLValue(RV, LV, T);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
+ } else if (CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitAnyExpr(E, LV.getAddress(), false);
+ } else {
+ CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
+ }
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+ if (!CGF.hasAggregateLLVMType(T)) {
+ // For non-aggregates, we can store zero
+ llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
+ CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
+ } else {
+ // There's a potential optimization opportunity in combining
+ // memsets; that would be easy for arrays, but relatively
+ // difficult for structures with the current code.
+ CGF.EmitNullInitialization(LV.getAddress(), T);
+ }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+ // FIXME: Assess perf here? Figure out what cases are worth optimizing here
+ // (Length of globals? Chunks of zeroed-out space?).
+ //
+ // If we can, prefer a copy from a global; this is a lot less code for long
+ // globals, and it's easier for the current optimizers to analyze.
+ if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
+ llvm::GlobalVariable* GV =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage, C, "");
+ EmitFinalDestCopy(E, LValue::MakeAddr(GV, Qualifiers()));
+ return;
+ }
+#endif
+ if (E->hadArrayRangeDesignator()) {
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ }
+
+ // Handle initialization of an array.
+ if (E->getType()->isArrayType()) {
+ const llvm::PointerType *APType =
+ cast<llvm::PointerType>(DestPtr->getType());
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+
+ uint64_t NumInitElements = E->getNumInits();
+
+ if (E->getNumInits() > 0) {
+ QualType T1 = E->getType();
+ QualType T2 = E->getInit(0)->getType();
+ if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
+ EmitAggLoadOfLValue(E->getInit(0));
+ return;
+ }
+ }
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
+ ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
+
+ // FIXME: were we intentionally ignoring address spaces and GC attributes?
+ Qualifiers Quals = CGF.MakeQualifiers(ElementType);
+
+ for (uint64_t i = 0; i != NumArrayElements; ++i) {
+ llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
+ if (i < NumInitElements)
+ EmitInitializationToLValue(E->getInit(i),
+ LValue::MakeAddr(NextVal, Quals),
+ ElementType);
+ else
+ EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, Quals),
+ ElementType);
+ }
+ return;
+ }
+
+ assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned NumInitElements = E->getNumInits();
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ unsigned CurInitVal = 0;
+
+ if (E->getType()->isUnionType()) {
+ // Only initialize one field of a union. The field itself is
+ // specified by the initializer list.
+ if (!E->getInitializedFieldInUnion()) {
+ // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end();
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return;
+ }
+
+ // FIXME: volatility
+ FieldDecl *Field = E->getInitializedFieldInUnion();
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
+
+ if (NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
+ } else {
+ // Default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+
+ return;
+ }
+
+ // If we're initializing the whole aggregate, just do it in place.
+ // FIXME: This is a hack around an AST bug (PR6537).
+ if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
+ EmitInitializationToLValue(E->getInit(0),
+ LValue::MakeAddr(DestPtr, Qualifiers()),
+ E->getType());
+ return;
+ }
+
+
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end();
+ Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member
+ if (Field->getType()->isIncompleteArrayType())
+ break;
+
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // FIXME: volatility
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
+ // We never generate write-barries for initialized fields.
+ LValue::SetObjCNonGC(FieldLoc, true);
+ if (CurInitVal < NumInitElements) {
+ // Store the initializer into the field.
+ EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
+ Field->getType());
+ } else {
+ // We're out of initalizers; default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type. The result is computed into DestPtr. Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed. If VolatileDest is
+/// true, DestPtr cannot be 0.
+//
+// FIXME: Take Qualifiers object.
+void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
+ bool VolatileDest, bool IgnoreResult,
+ bool IsInitializer,
+ bool RequiresGCollection) {
+ assert(E && hasAggregateLLVMType(E->getType()) &&
+ "Invalid aggregate expression to emit");
+ assert ((DestPtr != 0 || VolatileDest == false)
+ && "volatile aggregate can't be 0");
+
+ AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult, IsInitializer,
+ RequiresGCollection)
+ .Visit(const_cast<Expr*>(E));
+}
+
+LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
+ assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
+ Qualifiers Q = MakeQualifiers(E->getType());
+ llvm::Value *Temp = CreateMemTemp(E->getType());
+ EmitAggExpr(E, Temp, Q.hasVolatile());
+ return LValue::MakeAddr(Temp, Q);
+}
+
+void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
+ llvm::Value *SrcPtr, QualType Ty,
+ bool isVolatile) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ if (getContext().getLangOptions().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ assert((Record->hasTrivialCopyConstructor() ||
+ Record->hasTrivialCopyAssignment()) &&
+ "Trying to aggregate-copy a type without a trivial copy "
+ "constructor or assignment operator");
+ // Ignore empty classes in C++.
+ if (Record->isEmpty())
+ return;
+ }
+ }
+
+ // Aggregate assignment turns into llvm.memcpy. This is almost valid per
+ // C99 6.5.16.1p3, which states "If the value being stored in an object is
+ // read from another object that overlaps in anyway the storage of the first
+ // object, then the overlap shall be exact and the two objects shall have
+ // qualified or unqualified versions of a compatible type."
+ //
+ // memcpy is not defined if the source and destination pointers are exactly
+ // equal, but other compilers do this optimization, and almost every memcpy
+ // implementation handles this case safely. If there is a libc that does not
+ // safely handle this, we can add a target hook.
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+ // FIXME: Handle variable sized types.
+ const llvm::Type *IntPtr =
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+
+ // FIXME: If we have a volatile struct, the optimizer can remove what might
+ // appear to be `extra' memory ops:
+ //
+ // volatile struct { int i; } a, b;
+ //
+ // int main() {
+ // a = b;
+ // a = b;
+ // }
+ //
+ // we need to use a different call here. We use isVolatile to indicate when
+ // either the source or the destination is volatile.
+ const llvm::Type *I1Ty = llvm::Type::getInt1Ty(VMContext);
+ const llvm::Type *I8Ty = llvm::Type::getInt8Ty(VMContext);
+ const llvm::Type *I32Ty = llvm::Type::getInt32Ty(VMContext);
+
+ const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ const llvm::Type *DBP = llvm::PointerType::get(I8Ty, DPT->getAddressSpace());
+ if (DestPtr->getType() != DBP)
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
+
+ const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ const llvm::Type *SBP = llvm::PointerType::get(I8Ty, SPT->getAddressSpace());
+ if (SrcPtr->getType() != SBP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
+
+ Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(),
+ IntPtr),
+ DestPtr, SrcPtr,
+ // TypeInfo.first describes size in bits.
+ llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+ llvm::ConstantInt::get(I32Ty, TypeInfo.second/8),
+ llvm::ConstantInt::get(I1Ty, isVolatile));
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
new file mode 100644
index 0000000..f93c79c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -0,0 +1,979 @@
+//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+using namespace clang;
+using namespace CodeGen;
+
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This),
+ MD->getThisType(getContext())));
+
+ // If there is a VTT parameter, emit it.
+ if (VTT) {
+ QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+ Args.push_back(std::make_pair(RValue::get(VTT), T));
+ }
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+ QualType ResultType = FPT->getResultType();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
+ FPT->getExtInfo()),
+ Callee, ReturnValue, Args, MD);
+}
+
+/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
+ ReturnValueSlot ReturnValue) {
+ if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
+ return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
+
+ const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ if (MD->isStatic()) {
+ // The method is static, emit it as we would a regular call.
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee,
+ ReturnValue, CE->arg_begin(), CE->arg_end());
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Value *This;
+
+ if (ME->isArrow())
+ This = EmitScalarExpr(ME->getBase());
+ else {
+ LValue BaseLV = EmitLValue(ME->getBase());
+ This = BaseLV.getAddress();
+ }
+
+ if (MD->isCopyAssignment() && MD->isTrivial()) {
+ // We don't like to generate the trivial copy assignment operator when
+ // it isn't necessary; just produce the proper effect here.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ llvm::Value *Callee;
+ if (const CXXDestructorDecl *Destructor
+ = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (Destructor->isTrivial())
+ return RValue::get(0);
+ if (MD->isVirtual() && !ME->hasQualifier() &&
+ !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
+ Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty);
+ } else {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
+ }
+ } else if (MD->isVirtual() && !ME->hasQualifier() &&
+ !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
+ Callee = BuildVirtualCall(MD, This, Ty);
+ } else {
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+ }
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ const BinaryOperator *BO =
+ cast<BinaryOperator>(E->getCallee()->IgnoreParens());
+ const Expr *BaseExpr = BO->getLHS();
+ const Expr *MemFnExpr = BO->getRHS();
+
+ const MemberPointerType *MPT =
+ MemFnExpr->getType()->getAs<MemberPointerType>();
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = CreateMemTemp(MemFnExpr->getType(), "mem.fn");
+ EmitAggExpr(MemFnExpr, MemFnPtr, /*VolatileDest=*/false);
+
+ // Emit the 'this' pointer.
+ llvm::Value *This;
+
+ if (BO->getOpcode() == BinaryOperator::PtrMemI)
+ This = EmitScalarExpr(BaseExpr);
+ else
+ This = EmitLValue(BaseExpr).getAddress();
+
+ // Adjust it.
+ llvm::Value *Adj = Builder.CreateStructGEP(MemFnPtr, 1);
+ Adj = Builder.CreateLoad(Adj, "mem.fn.adj");
+
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Int8PtrTy, "ptr");
+ Ptr = Builder.CreateGEP(Ptr, Adj, "adj");
+
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this");
+
+ llvm::Value *FnPtr = Builder.CreateStructGEP(MemFnPtr, 0, "mem.fn.ptr");
+
+ const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+
+ llvm::Value *FnAsInt = Builder.CreateLoad(FnPtr, "fn");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual
+ = Builder.CreateAnd(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1),
+ "and");
+
+ IsVirtual = Builder.CreateTrunc(IsVirtual,
+ llvm::Type::getInt1Ty(VMContext));
+
+ llvm::BasicBlock *FnVirtual = createBasicBlock("fn.virtual");
+ llvm::BasicBlock *FnNonVirtual = createBasicBlock("fn.nonvirtual");
+ llvm::BasicBlock *FnEnd = createBasicBlock("fn.end");
+
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+ EmitBlock(FnVirtual);
+
+ const llvm::Type *VTableTy =
+ FTy->getPointerTo()->getPointerTo();
+
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
+ VTable = Builder.CreateLoad(VTable);
+
+ VTable = Builder.CreateBitCast(VTable, Int8PtrTy);
+ llvm::Value *VTableOffset =
+ Builder.CreateSub(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1));
+
+ VTable = Builder.CreateGEP(VTable, VTableOffset, "fn");
+ VTable = Builder.CreateBitCast(VTable, VTableTy);
+
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "virtualfn");
+
+ EmitBranch(FnEnd);
+ EmitBlock(FnNonVirtual);
+
+ // If the function is not virtual, just load the pointer.
+ llvm::Value *NonVirtualFn = Builder.CreateLoad(FnPtr, "fn");
+ NonVirtualFn = Builder.CreateIntToPtr(NonVirtualFn, FTy->getPointerTo());
+
+ EmitBlock(FnEnd);
+
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
+ Callee->reserveOperandSpace(2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+
+ CallArgList Args;
+
+ QualType ThisType =
+ getContext().getPointerType(getContext().getTagDeclType(RD));
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This), ThisType));
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
+ return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
+ ReturnValue, Args);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+ if (MD->isCopyAssignment()) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
+ if (ClassDecl->hasTrivialCopyAssignment()) {
+ assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
+ "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This;
+ if (LV.isPropertyRef()) {
+ llvm::Value *AggLoc = CreateMemTemp(E->getArg(1)->getType());
+ EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
+ EmitObjCPropertySet(LV.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc, false /*VolatileDest*/));
+ return RValue::getAggregate(0, false);
+ }
+ else
+ This = LV.getAddress();
+
+ llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+ QualType Ty = E->getType();
+ if (ClassDecl->hasObjectMember())
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, This, Src, Ty);
+ else
+ EmitAggregateCopy(This, Src, Ty);
+ return RValue::get(This);
+ }
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This;
+ if (LV.isPropertyRef()) {
+ RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getArg(0)->getType());
+ assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
+ This = RV.getAggregateAddr();
+ }
+ else
+ This = LV.getAddress();
+
+ llvm::Value *Callee;
+ if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
+ Callee = BuildVirtualCall(MD, This, Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ E->arg_begin() + 1, E->arg_end());
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+ const CXXConstructExpr *E) {
+ assert(Dest && "Must have a destination!");
+ const CXXConstructorDecl *CD = E->getConstructor();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(E->getType());
+ // For a copy constructor, even if it is trivial, must fall thru so
+ // its argument is code-gen'ed.
+ if (!CD->isCopyConstructor()) {
+ QualType InitType = E->getType();
+ if (Array)
+ InitType = getContext().getBaseElementType(Array);
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(InitType->getAs<RecordType>()->getDecl());
+ if (RD->hasTrivialConstructor())
+ return;
+ }
+ // Code gen optimization to eliminate copy constructor and return
+ // its first argument instead, if in fact that argument is a temporary
+ // object.
+ if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
+ if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
+ EmitAggExpr(Arg, Dest, false);
+ return;
+ }
+ }
+ if (Array) {
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(Dest, BasePtr);
+
+ EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
+ E->arg_begin(), E->arg_end());
+ }
+ else {
+ CXXCtorType Type =
+ (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
+ ? Ctor_Complete : Ctor_Base;
+ bool ForVirtualBase =
+ E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
+
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
+ E->arg_begin(), E->arg_end());
+ }
+}
+
+static CharUnits CalculateCookiePadding(ASTContext &Ctx, QualType ElementType) {
+ const RecordType *RT = ElementType->getAs<RecordType>();
+ if (!RT)
+ return CharUnits::Zero();
+
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return CharUnits::Zero();
+
+ // Check if the class has a trivial destructor.
+ if (RD->hasTrivialDestructor()) {
+ // Check if the usual deallocation function takes two arguments.
+ const CXXMethodDecl *UsualDeallocationFunction = 0;
+
+ DeclarationName OpName =
+ Ctx.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
+ DeclContext::lookup_const_iterator Op, OpEnd;
+ for (llvm::tie(Op, OpEnd) = RD->lookup(OpName);
+ Op != OpEnd; ++Op) {
+ const CXXMethodDecl *Delete = cast<CXXMethodDecl>(*Op);
+
+ if (Delete->isUsualDeallocationFunction()) {
+ UsualDeallocationFunction = Delete;
+ break;
+ }
+ }
+
+ // No usual deallocation function, we don't need a cookie.
+ if (!UsualDeallocationFunction)
+ return CharUnits::Zero();
+
+ // The usual deallocation function doesn't take a size_t argument, so we
+ // don't need a cookie.
+ if (UsualDeallocationFunction->getNumParams() == 1)
+ return CharUnits::Zero();
+
+ assert(UsualDeallocationFunction->getNumParams() == 2 &&
+ "Unexpected deallocation function type!");
+ }
+
+ // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(ElementType));
+}
+
+static CharUnits CalculateCookiePadding(ASTContext &Ctx, const CXXNewExpr *E) {
+ if (!E->isArray())
+ return CharUnits::Zero();
+
+ // No cookie is required if the new operator being used is
+ // ::operator new[](size_t, void*).
+ const FunctionDecl *OperatorNew = E->getOperatorNew();
+ if (OperatorNew->getDeclContext()->getLookupContext()->isFileContext()) {
+ if (OperatorNew->getNumParams() == 2) {
+ CanQualType ParamType =
+ Ctx.getCanonicalType(OperatorNew->getParamDecl(1)->getType());
+
+ if (ParamType == Ctx.VoidPtrTy)
+ return CharUnits::Zero();
+ }
+ }
+
+ return CalculateCookiePadding(Ctx, E->getAllocatedType());
+}
+
+static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
+ CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ llvm::Value *& NumElements) {
+ QualType Type = E->getAllocatedType();
+ CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(Type);
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+
+ if (!E->isArray())
+ return llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
+
+ CharUnits CookiePadding = CalculateCookiePadding(CGF.getContext(), E);
+
+ Expr::EvalResult Result;
+ if (E->getArraySize()->Evaluate(Result, CGF.getContext()) &&
+ !Result.HasSideEffects && Result.Val.isInt()) {
+
+ CharUnits AllocSize =
+ Result.Val.getInt().getZExtValue() * TypeSize + CookiePadding;
+
+ NumElements =
+ llvm::ConstantInt::get(SizeTy, Result.Val.getInt().getZExtValue());
+ while (const ArrayType *AType = Context.getAsArrayType(Type)) {
+ const llvm::ArrayType *llvmAType =
+ cast<llvm::ArrayType>(CGF.ConvertType(Type));
+ NumElements =
+ CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(
+ SizeTy, llvmAType->getNumElements()));
+ Type = AType->getElementType();
+ }
+
+ return llvm::ConstantInt::get(SizeTy, AllocSize.getQuantity());
+ }
+
+ // Emit the array size expression.
+ NumElements = CGF.EmitScalarExpr(E->getArraySize());
+
+ // Multiply with the type size.
+ llvm::Value *V =
+ CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(SizeTy,
+ TypeSize.getQuantity()));
+
+ while (const ArrayType *AType = Context.getAsArrayType(Type)) {
+ const llvm::ArrayType *llvmAType =
+ cast<llvm::ArrayType>(CGF.ConvertType(Type));
+ NumElements =
+ CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(
+ SizeTy, llvmAType->getNumElements()));
+ Type = AType->getElementType();
+ }
+
+ // And add the cookie padding if necessary.
+ if (!CookiePadding.isZero())
+ V = CGF.Builder.CreateAdd(V,
+ llvm::ConstantInt::get(SizeTy, CookiePadding.getQuantity()));
+
+ return V;
+}
+
+static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements) {
+ if (E->isArray()) {
+ if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ if (!Ctor->getParent()->hasTrivialConstructor())
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ E->constructor_arg_begin(),
+ E->constructor_arg_end());
+ return;
+ }
+ }
+
+ QualType AllocType = E->getAllocatedType();
+
+ if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
+ NewPtr, E->constructor_arg_begin(),
+ E->constructor_arg_end());
+
+ return;
+ }
+
+ // We have a POD type.
+ if (E->getNumConstructorArgs() == 0)
+ return;
+
+ assert(E->getNumConstructorArgs() == 1 &&
+ "Can only have one argument to initializer of POD type.");
+
+ const Expr *Init = E->getConstructorArg(0);
+
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
+ AllocType.isVolatileQualified(), AllocType);
+ else if (AllocType->isAnyComplexType())
+ CGF.EmitComplexExprIntoAddr(Init, NewPtr,
+ AllocType.isVolatileQualified());
+ else
+ CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ QualType AllocType = E->getAllocatedType();
+ FunctionDecl *NewFD = E->getOperatorNew();
+ const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList NewArgs;
+
+ // The allocation size is the first argument.
+ QualType SizeTy = getContext().getSizeType();
+
+ llvm::Value *NumElements = 0;
+ llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
+ *this, E, NumElements);
+
+ NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
+
+ // Emit the rest of the arguments.
+ // FIXME: Ideally, this should just use EmitCallArgs.
+ CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
+
+ // First, use the types from the function type.
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
+ QualType ArgType = NewFTy->getArgType(i);
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
+ NewArg != NewArgEnd; ++NewArg) {
+ QualType ArgType = NewArg->getType();
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+ }
+
+ // Emit the call to new.
+ RValue RV =
+ EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
+ CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
+
+ // If an allocation function is declared with an empty exception specification
+ // it returns null to indicate failure to allocate storage. [expr.new]p13.
+ // (We don't need to check for null when there's no new initializer and
+ // we're allocating a POD type).
+ bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
+ !(AllocType->isPODType() && !E->hasInitializer());
+
+ llvm::BasicBlock *NewNull = 0;
+ llvm::BasicBlock *NewNotNull = 0;
+ llvm::BasicBlock *NewEnd = 0;
+
+ llvm::Value *NewPtr = RV.getScalarVal();
+
+ if (NullCheckResult) {
+ NewNull = createBasicBlock("new.null");
+ NewNotNull = createBasicBlock("new.notnull");
+ NewEnd = createBasicBlock("new.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(NewPtr,
+ llvm::Constant::getNullValue(NewPtr->getType()),
+ "isnull");
+
+ Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
+ EmitBlock(NewNotNull);
+ }
+
+ CharUnits CookiePadding = CalculateCookiePadding(getContext(), E);
+ if (!CookiePadding.isZero()) {
+ CharUnits CookieOffset =
+ CookiePadding - getContext().getTypeSizeInChars(SizeTy);
+
+ llvm::Value *NumElementsPtr =
+ Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset.getQuantity());
+
+ NumElementsPtr = Builder.CreateBitCast(NumElementsPtr,
+ ConvertType(SizeTy)->getPointerTo());
+ Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Now add the padding to the new ptr.
+ NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookiePadding.getQuantity());
+ }
+
+ if (AllocType->isArrayType()) {
+ while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
+ AllocType = AType->getElementType();
+ NewPtr =
+ Builder.CreateBitCast(NewPtr,
+ ConvertType(getContext().getPointerType(AllocType)));
+ EmitNewInitializer(*this, E, NewPtr, NumElements);
+ NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+ }
+ else {
+ NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+ EmitNewInitializer(*this, E, NewPtr, NumElements);
+ }
+
+ if (NullCheckResult) {
+ Builder.CreateBr(NewEnd);
+ NewNotNull = Builder.GetInsertBlock();
+ EmitBlock(NewNull);
+ Builder.CreateBr(NewEnd);
+ EmitBlock(NewEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(NewPtr, NewNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
+
+ NewPtr = PHI;
+ }
+
+ return NewPtr;
+}
+
+static std::pair<llvm::Value *, llvm::Value *>
+GetAllocatedObjectPtrAndNumElements(CodeGenFunction &CGF,
+ llvm::Value *Ptr, QualType DeleteTy) {
+ QualType SizeTy = CGF.getContext().getSizeType();
+ const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ CharUnits DeleteTypeAlign = CGF.getContext().getTypeAlignInChars(DeleteTy);
+ CharUnits CookiePadding =
+ std::max(CGF.getContext().getTypeSizeInChars(SizeTy),
+ DeleteTypeAlign);
+ assert(!CookiePadding.isZero() && "CookiePadding should not be 0.");
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ CharUnits CookieOffset =
+ CookiePadding - CGF.getContext().getTypeSizeInChars(SizeTy);
+
+ llvm::Value *AllocatedObjectPtr = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+ AllocatedObjectPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr,
+ -CookiePadding.getQuantity());
+
+ llvm::Value *NumElementsPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr,
+ CookieOffset.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo());
+
+ llvm::Value *NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+ NumElements =
+ CGF.Builder.CreateIntCast(NumElements, SizeLTy, /*isSigned=*/false);
+
+ return std::make_pair(AllocatedObjectPtr, NumElements);
+}
+
+void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
+ llvm::Value *Ptr,
+ QualType DeleteTy) {
+ const FunctionProtoType *DeleteFTy =
+ DeleteFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList DeleteArgs;
+
+ // Check if we need to pass the size to the delete operator.
+ llvm::Value *Size = 0;
+ QualType SizeTy;
+ if (DeleteFTy->getNumArgs() == 2) {
+ SizeTy = DeleteFTy->getArgType(1);
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ Size = llvm::ConstantInt::get(ConvertType(SizeTy),
+ DeleteTypeSize.getQuantity());
+ }
+
+ if (DeleteFD->getOverloadedOperator() == OO_Array_Delete &&
+ !CalculateCookiePadding(getContext(), DeleteTy).isZero()) {
+ // We need to get the number of elements in the array from the cookie.
+ llvm::Value *AllocatedObjectPtr;
+ llvm::Value *NumElements;
+ llvm::tie(AllocatedObjectPtr, NumElements) =
+ GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
+
+ // Multiply the size with the number of elements.
+ if (Size)
+ Size = Builder.CreateMul(NumElements, Size);
+
+ Ptr = AllocatedObjectPtr;
+ }
+
+ QualType ArgTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
+ DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
+
+ if (Size)
+ DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
+
+ // Emit the call to delete.
+ EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
+ CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
+ DeleteArgs, DeleteFD);
+}
+
+void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
+
+ // Get at the argument before we performed the implicit conversion
+ // to void*.
+ const Expr *Arg = E->getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CastExpr::CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+
+ llvm::Value *Ptr = EmitScalarExpr(Arg);
+
+ // Null check the pointer.
+ llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
+ llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
+ "isnull");
+
+ Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
+ EmitBlock(DeleteNotNull);
+
+ bool ShouldCallDelete = true;
+
+ // Call the destructor if necessary.
+ if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!RD->hasTrivialDestructor()) {
+ const CXXDestructorDecl *Dtor = RD->getDestructor(getContext());
+ if (E->isArrayForm()) {
+ llvm::Value *AllocatedObjectPtr;
+ llvm::Value *NumElements;
+ llvm::tie(AllocatedObjectPtr, NumElements) =
+ GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
+
+ EmitCXXAggrDestructorCall(Dtor, NumElements, Ptr);
+ } else if (Dtor->isVirtual()) {
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(Dtor),
+ /*isVariadic=*/false);
+
+ llvm::Value *Callee = BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
+ EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
+ 0, 0);
+
+ // The dtor took care of deleting the object.
+ ShouldCallDelete = false;
+ } else
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ Ptr);
+ }
+ }
+ }
+
+ if (ShouldCallDelete)
+ EmitDeleteCall(E->getOperatorDelete(), Ptr, DeleteTy);
+
+ EmitBlock(DeleteEnd);
+}
+
+llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ QualType Ty = E->getType();
+ const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
+
+ if (E->isTypeOperand()) {
+ llvm::Constant *TypeInfo =
+ CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
+ return Builder.CreateBitCast(TypeInfo, LTy);
+ }
+
+ Expr *subE = E->getExprOperand();
+ Ty = subE->getType();
+ CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
+ Ty = CanTy.getUnqualifiedType().getNonReferenceType();
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isPolymorphic()) {
+ // FIXME: if subE is an lvalue do
+ LValue Obj = EmitLValue(subE);
+ llvm::Value *This = Obj.getAddress();
+ LTy = LTy->getPointerTo()->getPointerTo();
+ llvm::Value *V = Builder.CreateBitCast(This, LTy);
+ // We need to do a zero check for *p, unless it has NonNullAttr.
+ // FIXME: PointerType->hasAttr<NonNullAttr>()
+ bool CanBeZero = false;
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
+ if (UO->getOpcode() == UnaryOperator::Deref)
+ CanBeZero = true;
+ if (CanBeZero) {
+ llvm::BasicBlock *NonZeroBlock = createBasicBlock();
+ llvm::BasicBlock *ZeroBlock = createBasicBlock();
+
+ llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
+ Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
+ NonZeroBlock, ZeroBlock);
+ EmitBlock(ZeroBlock);
+ /// Call __cxa_bad_typeid
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ const llvm::FunctionType *FTy;
+ FTy = llvm::FunctionType::get(ResultType, false);
+ llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
+ Builder.CreateCall(F)->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ EmitBlock(NonZeroBlock);
+ }
+ V = Builder.CreateLoad(V, "vtable");
+ V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
+ V = Builder.CreateLoad(V);
+ return V;
+ }
+ }
+ return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
+}
+
+llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
+ const CXXDynamicCastExpr *DCE) {
+ QualType SrcTy = DCE->getSubExpr()->getType();
+ QualType DestTy = DCE->getTypeAsWritten();
+ QualType InnerType = DestTy->getPointeeType();
+
+ const llvm::Type *LTy = ConvertType(DCE->getType());
+
+ bool CanBeZero = false;
+ bool ToVoid = false;
+ bool ThrowOnBad = false;
+ if (DestTy->isPointerType()) {
+ // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
+ CanBeZero = true;
+ if (InnerType->isVoidType())
+ ToVoid = true;
+ } else {
+ LTy = LTy->getPointerTo();
+
+ // FIXME: What if exceptions are disabled?
+ ThrowOnBad = true;
+ }
+
+ if (SrcTy->isPointerType() || SrcTy->isReferenceType())
+ SrcTy = SrcTy->getPointeeType();
+ SrcTy = SrcTy.getUnqualifiedType();
+
+ if (DestTy->isPointerType() || DestTy->isReferenceType())
+ DestTy = DestTy->getPointeeType();
+ DestTy = DestTy.getUnqualifiedType();
+
+ llvm::BasicBlock *ContBlock = createBasicBlock();
+ llvm::BasicBlock *NullBlock = 0;
+ llvm::BasicBlock *NonZeroBlock = 0;
+ if (CanBeZero) {
+ NonZeroBlock = createBasicBlock();
+ NullBlock = createBasicBlock();
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
+ EmitBlock(NonZeroBlock);
+ }
+
+ llvm::BasicBlock *BadCastBlock = 0;
+
+ const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+
+ // See if this is a dynamic_cast(void*)
+ if (ToVoid) {
+ llvm::Value *This = V;
+ V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
+ V = Builder.CreateLoad(V, "vtable");
+ V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
+ V = Builder.CreateLoad(V, "offset to top");
+ This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
+ V = Builder.CreateInBoundsGEP(This, V);
+ V = Builder.CreateBitCast(V, LTy);
+ } else {
+ /// Call __dynamic_cast
+ const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *PtrToInt8Ty
+ = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrDiffTy);
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+
+ // FIXME: Calculate better hint.
+ llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
+
+ assert(SrcTy->isRecordType() && "Src type must be record type!");
+ assert(DestTy->isRecordType() && "Dest type must be record type!");
+
+ llvm::Value *SrcArg
+ = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
+ llvm::Value *DestArg
+ = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
+
+ V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
+ V, SrcArg, DestArg, hint);
+ V = Builder.CreateBitCast(V, LTy);
+
+ if (ThrowOnBad) {
+ BadCastBlock = createBasicBlock();
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
+ EmitBlock(BadCastBlock);
+ /// Invoke __cxa_bad_cast
+ ResultType = llvm::Type::getVoidTy(VMContext);
+ const llvm::FunctionType *FBadTy;
+ FBadTy = llvm::FunctionType::get(ResultType, false);
+ llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
+ if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
+ EmitBlock(Cont);
+ } else {
+ // FIXME: Does this ever make sense?
+ Builder.CreateCall(F)->setDoesNotReturn();
+ }
+ Builder.CreateUnreachable();
+ }
+ }
+
+ if (CanBeZero) {
+ Builder.CreateBr(ContBlock);
+ EmitBlock(NullBlock);
+ Builder.CreateBr(ContBlock);
+ }
+ EmitBlock(ContBlock);
+ if (CanBeZero) {
+ llvm::PHINode *PHI = Builder.CreatePHI(LTy);
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(V, NonZeroBlock);
+ PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
+ V = PHI;
+ }
+
+ return V;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..0a0c914
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,742 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+namespace {
+class ComplexExprEmitter
+ : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ // True is we should ignore the value of a
+ bool IgnoreReal;
+ bool IgnoreImag;
+ // True if we should ignore the value of a=b
+ bool IgnoreRealAssign;
+ bool IgnoreImagAssign;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
+ bool irn=false, bool iin=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
+ IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreReal() {
+ bool I = IgnoreReal;
+ IgnoreReal = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImag() {
+ bool I = IgnoreImag;
+ IgnoreImag = false;
+ return I;
+ }
+ bool TestAndClearIgnoreRealAssign() {
+ bool I = IgnoreRealAssign;
+ IgnoreRealAssign = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImagAssign() {
+ bool I = IgnoreImagAssign;
+ IgnoreImagAssign = false;
+ return I;
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ if (LV.isSimple())
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+
+ if (LV.isPropertyRef())
+ return CGF.EmitObjCPropertyGet(LV.getPropertyRefExpr()).getComplexVal();
+
+ assert(LV.isKVCRef() && "Unknown LValue type!");
+ return CGF.EmitObjCPropertyGet(LV.getKVCRefExpr()).getComplexVal();
+ }
+
+ /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
+ /// the real and imaginary pieces.
+ ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+
+ /// EmitStoreOfComplex - Store the specified real/imag parts into the
+ /// specified value pointer.
+ void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
+
+ /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+ QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ ComplexPairTy VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return ComplexPairTy();
+ }
+ ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+
+ // l-values.
+ ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getComplexVal();
+ }
+ ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // FIXME: CompoundLiteralExpr
+
+ ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
+ ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // Unlike for scalars, we don't have to worry about function->ptr demotion
+ // here.
+ return EmitCast(E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCastExpr(CastExpr *E) {
+ return EmitCast(E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCallExpr(const CallExpr *E);
+ ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+ // Operators.
+ ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
+ }
+ ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+ ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
+ // LNot,Real,Imag never return complex.
+ ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
+ }
+ ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+ ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null =
+ llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+
+ struct BinOpInfo {
+ ComplexPairTy LHS;
+ ComplexPairTy RHS;
+ QualType Ty; // Computation Type.
+ };
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &));
+
+ ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+ ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+ ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+ ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+ return EmitBinAdd(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+ return EmitBinSub(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+ return EmitBinDiv(EmitBinOps(E));
+ }
+
+ // Compound assignments.
+ ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+ }
+ ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+ }
+ ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+ }
+ ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+ }
+
+ // GCC rejects rem/and/or/xor for integer complex.
+ // Logical and/or always return int, never complex.
+
+ // No comparisons produce a complex result.
+ ComplexPairTy VisitBinAssign (const BinaryOperator *E);
+ ComplexPairTy VisitBinComma (const BinaryOperator *E);
+
+
+ ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
+ ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+ ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+ ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
+ bool isVolatile) {
+ llvm::Value *Real=0, *Imag=0;
+
+ if (!IgnoreReal) {
+ llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
+ SrcPtr->getName() + ".realp");
+ Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
+ }
+
+ if (!IgnoreImag) {
+ llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
+ SrcPtr->getName() + ".imagp");
+ Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
+ }
+ return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
+ bool isVolatile) {
+ llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
+
+ Builder.CreateStore(Val.first, RealPtr, isVolatile);
+ Builder.CreateStore(Val.second, ImagPtr, isVolatile);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "complex expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+ return
+ ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+}
+
+/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+ QualType SrcType,
+ QualType DestType) {
+ // Get the src/dest element type.
+ SrcType = SrcType->getAs<ComplexType>()->getElementType();
+ DestType = DestType->getAs<ComplexType>()->getElementType();
+
+ // C99 6.3.1.6: When a value of complex type is converted to another
+ // complex type, both the real and imaginary parts follow the conversion
+ // rules for the corresponding real types.
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
+ // Two cases here: cast from (complex to complex) and (scalar to complex).
+ if (Op->getType()->isAnyComplexType())
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+
+ // C99 6.3.1.7: When a value of real type is converted to a complex type, the
+ // real part of the complex result value is determined by the rules of
+ // conversion to the corresponding real type and the imaginary part of the
+ // complex result value is a positive zero or an unsigned zero.
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAs<ComplexType>()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ ComplexPairTy Op = Visit(E->getSubExpr());
+
+ llvm::Value *ResR, *ResI;
+ if (Op.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFNeg(Op.first, "neg.r");
+ ResI = Builder.CreateFNeg(Op.second, "neg.i");
+ } else {
+ ResR = Builder.CreateNeg(Op.first, "neg.r");
+ ResI = Builder.CreateNeg(Op.second, "neg.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ // ~(a+ib) = a + i*-b
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResI;
+ if (Op.second->getType()->isFloatingPointTy())
+ ResI = Builder.CreateFNeg(Op.second, "conj.i");
+ else
+ ResI = Builder.CreateNeg(Op.second, "conj.i");
+
+ return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ } else {
+ ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ } else {
+ ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+ using llvm::Value;
+ Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
+ } else {
+ Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+ llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+ llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+
+ llvm::Value *DSTr, *DSTi;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr, "tmp"); // a*c
+ llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi, "tmp"); // b*d
+ llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr, "tmp"); // c*c
+ llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi, "tmp"); // d*d
+ llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr, "tmp"); // b*c
+ llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi, "tmp"); // a*d
+ llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+ if (Op.Ty->getAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp");
+ }
+ }
+
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ BinOpInfo Ops;
+ Ops.LHS = Visit(E->getLHS());
+ Ops.RHS = Visit(E->getRHS());
+ Ops.Ty = E->getType();
+ return Ops;
+}
+
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little. It is possible for the RHS to be complex or
+ // scalar.
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
+
+ LValue LHSLV = CGF.EmitLValue(E->getLHS());
+ // We know the LHS is a complex lvalue.
+ ComplexPairTy LHSComplexPair;
+ if (LHSLV.isPropertyRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
+ else if (LHSLV.isKVCRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
+ else
+ LHSComplexPair = EmitLoadOfComplex(LHSLV.getAddress(),
+ LHSLV.isVolatileQualified());
+
+ OpInfo.LHS=EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
+
+ // Expand the binary operator.
+ ComplexPairTy Result = (this->*Func)(OpInfo);
+
+ // Truncate the result back to the LHS type.
+ Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+
+ // Store the result value into the LHS lvalue.
+ if (LHSLV.isPropertyRef())
+ CGF.EmitObjCPropertySet(LHSLV.getPropertyRefExpr(),
+ RValue::getComplex(Result));
+ else if (LHSLV.isKVCRef())
+ CGF.EmitObjCPropertySet(LHSLV.getKVCRefExpr(), RValue::getComplex(Result));
+ else
+ EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
+ // And now return the LHS
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+ if (LHSLV.isPropertyRef())
+ return CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
+ else if (LHSLV.isKVCRef())
+ return CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
+ return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) ==
+ CGF.getContext().getCanonicalType(E->getRHS()->getType()) &&
+ "Invalid assignment");
+ // Emit the RHS.
+ ComplexPairTy Val = Visit(E->getRHS());
+
+ // Compute the address to store into.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Store into it, if simple.
+ if (LHS.isSimple()) {
+ EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
+
+ // And now return the LHS
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+ }
+
+ // Otherwise we must have a property setter (no complex vector/bitfields).
+ if (LHS.isPropertyRef())
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
+ else
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val));
+
+ // There is no reload after a store through a method, but we need to restore
+ // the Ignore* flags.
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ if (!E->getLHS()) {
+ CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for complex value");
+
+ ComplexPairTy LHS = Visit(E->getLHS());
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ ComplexPairTy RHS = Visit(E->getRHS());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
+ RealPN->reserveOperandSpace(2);
+ RealPN->addIncoming(LHS.first, LHSBlock);
+ RealPN->addIncoming(RHS.first, RHSBlock);
+
+ // Create a PHI node for the imaginary part.
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i");
+ ImagPN->reserveOperandSpace(2);
+ ImagPN->addIncoming(LHS.second, LHSBlock);
+ ImagPN->addIncoming(RHS.second, RHSBlock);
+
+ return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreReal();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ Ignore = TestAndClearIgnoreImag();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ if (E->getNumInits())
+ return Visit(E->getInit(0));
+
+ // Empty init list intializes to null
+ QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
+ const llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+ return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(E, "complex va_arg expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ // FIXME Volatility.
+ return EmitLoadOfComplex(ArgPtr, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+ bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+/// of complex type, storing into the specified Value*.
+void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+ ComplexExprEmitter Emitter(*this);
+ ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+ Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile);
+}
+
+/// StoreComplexToAddr - Store a complex number into the specified address.
+void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile);
+}
+
+/// LoadComplexFromAddr - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+ bool SrcIsVolatile) {
+ return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..551a47a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,1181 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// ConstStructBuilder
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConstStructBuilder {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+
+ bool Packed;
+ unsigned NextFieldOffsetInBytes;
+ unsigned LLVMStructAlignment;
+ std::vector<llvm::Constant *> Elements;
+public:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ InitListExpr *ILE);
+
+private:
+ ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
+ : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
+ LLVMStructAlignment(1) { }
+
+ bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr);
+
+ bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr);
+
+ void AppendPadding(uint64_t NumBytes);
+
+ void AppendTailPadding(uint64_t RecordSize);
+
+ void ConvertStructToPacked();
+
+ bool Build(InitListExpr *ILE);
+
+ unsigned getAlignment(const llvm::Constant *C) const {
+ if (Packed) return 1;
+ return CGM.getTargetData().getABITypeAlignment(C->getType());
+ }
+
+ uint64_t getSizeInBytes(const llvm::Constant *C) const {
+ return CGM.getTargetData().getTypeAllocSize(C->getType());
+ }
+};
+
+bool ConstStructBuilder::
+AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ uint64_t FieldOffsetInBytes = FieldOffset / 8;
+
+ assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
+ && "Field offset mismatch!");
+
+ // Emit the field.
+ if (!InitCst)
+ return false;
+
+ unsigned FieldAlignment = getAlignment(InitCst);
+
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+
+ if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
+
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
+
+ assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
+ "Did not add enough padding!");
+
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
+
+ // Add the field.
+ Elements.push_back(InitCst);
+ NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
+ getSizeInBytes(InitCst);
+
+ if (Packed)
+ assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
+ else
+ LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+
+ return true;
+}
+
+bool ConstStructBuilder::
+ AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ llvm::ConstantInt *CI = cast_or_null<llvm::ConstantInt>(InitCst);
+ // FIXME: Can this ever happen?
+ if (!CI)
+ return false;
+
+ if (FieldOffset > NextFieldOffsetInBytes * 8) {
+ // We need to add padding.
+ uint64_t NumBytes =
+ llvm::RoundUpToAlignment(FieldOffset -
+ NextFieldOffsetInBytes * 8, 8) / 8;
+
+ AppendPadding(NumBytes);
+ }
+
+ uint64_t FieldSize =
+ Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+ llvm::APInt FieldValue = CI->getValue();
+
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue.zext(FieldSize);
+
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue.trunc(FieldSize);
+
+ if (FieldOffset < NextFieldOffsetInBytes * 8) {
+ // Either part of the field or the entire field can go into the previous
+ // byte.
+ assert(!Elements.empty() && "Elements can't be empty!");
+
+ unsigned BitsInPreviousByte =
+ NextFieldOffsetInBytes * 8 - FieldOffset;
+
+ bool FitsCompletelyInPreviousByte =
+ BitsInPreviousByte >= FieldValue.getBitWidth();
+
+ llvm::APInt Tmp = FieldValue;
+
+ if (!FitsCompletelyInPreviousByte) {
+ unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining high bits.
+ FieldValue.trunc(NewFieldWidth);
+ } else {
+ Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining low bits.
+ FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue.trunc(NewFieldWidth);
+ }
+ }
+
+ Tmp.zext(8);
+ if (CGM.getTargetData().isBigEndian()) {
+ if (FitsCompletelyInPreviousByte)
+ Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+ } else {
+ Tmp = Tmp.shl(8 - BitsInPreviousByte);
+ }
+
+ // Or in the bits that go into the previous byte.
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back()))
+ Tmp |= Val->getValue();
+ else
+ assert(isa<llvm::UndefValue>(Elements.back()));
+
+ Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+
+ if (FitsCompletelyInPreviousByte)
+ return true;
+ }
+
+ while (FieldValue.getBitWidth() > 8) {
+ llvm::APInt Tmp;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ // We want the high bits.
+ Tmp = FieldValue;
+ Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
+ Tmp.trunc(8);
+ } else {
+ // We want the low bits.
+ Tmp = FieldValue;
+ Tmp.trunc(8);
+
+ FieldValue = FieldValue.lshr(8);
+ }
+
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+ NextFieldOffsetInBytes++;
+
+ FieldValue.trunc(FieldValue.getBitWidth() - 8);
+ }
+
+ assert(FieldValue.getBitWidth() > 0 &&
+ "Should have at least one bit left!");
+ assert(FieldValue.getBitWidth() <= 8 &&
+ "Should not have more than a byte left!");
+
+ if (FieldValue.getBitWidth() < 8) {
+ if (CGM.getTargetData().isBigEndian()) {
+ unsigned BitWidth = FieldValue.getBitWidth();
+
+ FieldValue.zext(8);
+ FieldValue = FieldValue << (8 - BitWidth);
+ } else
+ FieldValue.zext(8);
+ }
+
+ // Append the last element.
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+ FieldValue));
+ NextFieldOffsetInBytes++;
+ return true;
+}
+
+void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
+ if (!NumBytes)
+ return;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ llvm::Constant *C = llvm::UndefValue::get(Ty);
+ Elements.push_back(C);
+ assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
+
+ NextFieldOffsetInBytes += getSizeInBytes(C);
+}
+
+void ConstStructBuilder::AppendTailPadding(uint64_t RecordSize) {
+ assert(RecordSize % 8 == 0 && "Invalid record size!");
+
+ uint64_t RecordSizeInBytes = RecordSize / 8;
+ assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+
+ unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ AppendPadding(NumPadBytes);
+}
+
+void ConstStructBuilder::ConvertStructToPacked() {
+ std::vector<llvm::Constant *> PackedElements;
+ uint64_t ElementOffsetInBytes = 0;
+
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ llvm::Constant *C = Elements[i];
+
+ unsigned ElementAlign =
+ CGM.getTargetData().getABITypeAlignment(C->getType());
+ uint64_t AlignedElementOffsetInBytes =
+ llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
+
+ if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
+ // We need some padding.
+ uint64_t NumBytes =
+ AlignedElementOffsetInBytes - ElementOffsetInBytes;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ llvm::Constant *Padding = llvm::UndefValue::get(Ty);
+ PackedElements.push_back(Padding);
+ ElementOffsetInBytes += getSizeInBytes(Padding);
+ }
+
+ PackedElements.push_back(C);
+ ElementOffsetInBytes += getSizeInBytes(C);
+ }
+
+ assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
+ "Packing the struct changed its size!");
+
+ Elements = PackedElements;
+ LLVMStructAlignment = 1;
+ Packed = true;
+}
+
+bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ unsigned FieldNo = 0;
+ unsigned ElementNo = 0;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isBitField() && !Field->getIdentifier())
+ continue;
+
+ // Get the initializer. A struct can include fields without initializers,
+ // we just use explicit null values for them.
+ llvm::Constant *EltInit;
+ if (ElementNo < ILE->getNumInits())
+ EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
+ Field->getType(), CGF);
+ else
+ EltInit = CGM.EmitNullConstant(Field->getType());
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
+ return false;
+ } else {
+ // Otherwise we have a bitfield.
+ if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
+ return false;
+ }
+ }
+
+ uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+
+ if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
+ // If the struct is bigger than the size of the record type,
+ // we must have a flexible array member at the end.
+ assert(RD->hasFlexibleArrayMember() &&
+ "Must have flexible array member if struct is bigger than type!");
+
+ // No tail padding is necessary.
+ return true;
+ }
+
+ uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
+ LLVMStructAlignment);
+
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ LLVMSizeInBytes > LayoutSizeInBytes) {
+ assert(!Packed && "Size mismatch!");
+
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ "Converting to packed did not help!");
+ }
+
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
+
+ assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+ "Tail padding mismatch!");
+
+ return true;
+}
+
+llvm::Constant *ConstStructBuilder::
+ BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ llvm::Constant *Result =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ Builder.Elements, Builder.Packed);
+
+ assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
+ Builder.getAlignment(Result)) ==
+ Builder.getSizeInBytes(Result) && "Size mismatch!");
+
+ return Result;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ConstExprEmitter
+//===----------------------------------------------------------------------===//
+
+class ConstExprEmitter :
+ public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+ llvm::LLVMContext &VMContext;
+public:
+ ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *VisitStmt(Stmt *S) {
+ return 0;
+ }
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+
+ llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Visit(E->getInitializer());
+ }
+
+ llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+
+ MD = MD->getCanonicalDecl();
+
+ const llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ llvm::Constant *Values[2];
+
+ // Get the function pointer (or index if this is a virtual function).
+ if (MD->isVirtual()) {
+ uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
+
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes =
+ CGM.getContext().Target.getPointerWidth(0) / 8;
+
+ // Itanium C++ ABI 2.3:
+ // For a non-virtual function, this field is a simple function pointer.
+ // For a virtual function, it is 1 plus the virtual table offset
+ // (in bytes) of the function, represented as a ptrdiff_t.
+ Values[0] = llvm::ConstantInt::get(PtrDiffTy,
+ (Index * PointerWidthInBytes) + 1);
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+
+ llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD, Ty);
+ Values[0] = llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
+ }
+
+ // The adjustment will always be 0.
+ Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
+
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ Values, 2, /*Packed=*/false);
+ }
+
+ llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
+ if (const MemberPointerType *MPT =
+ E->getType()->getAs<MemberPointerType>()) {
+ QualType T = MPT->getPointeeType();
+ DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
+
+ NamedDecl *ND = DRE->getDecl();
+ if (T->isFunctionProtoType())
+ return EmitMemberFunctionPointer(cast<CXXMethodDecl>(ND));
+
+ // We have a pointer to data member.
+ return CGM.EmitPointerToDataMember(cast<FieldDecl>(ND));
+ }
+
+ return 0;
+ }
+
+ llvm::Constant *VisitBinSub(BinaryOperator *E) {
+ // This must be a pointer/pointer subtraction. This only happens for
+ // address of label.
+ if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
+ !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
+ return 0;
+
+ llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
+ E->getLHS()->getType(), CGF);
+ llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
+ E->getRHS()->getType(), CGF);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
+
+ // No need to divide by element size, since addr of label is always void*,
+ // which has size 1 in GNUish.
+ return llvm::ConstantExpr::getSub(LHS, RHS);
+ }
+
+ llvm::Constant *VisitCastExpr(CastExpr* E) {
+ switch (E->getCastKind()) {
+ case CastExpr::CK_ToUnion: {
+ // GCC cast to union extension
+ assert(E->getType()->isUnionType() &&
+ "Destination type is not union type!");
+ const llvm::Type *Ty = ConvertType(E->getType());
+ Expr *SubExpr = E->getSubExpr();
+
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+ if (!C)
+ return 0;
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ std::vector<llvm::Constant*> Elts;
+ std::vector<const llvm::Type*> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
+
+ assert(CurSize <= TotalSize && "Union size mismatch!");
+ if (unsigned NumPadBytes = TotalSize - CurSize) {
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ if (NumPadBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPadBytes);
+
+ Elts.push_back(llvm::UndefValue::get(Ty));
+ Types.push_back(Ty);
+ }
+
+ llvm::StructType* STy =
+ llvm::StructType::get(C->getType()->getContext(), Types, false);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+ case CastExpr::CK_NullToMemberPointer:
+ return CGM.EmitNullConstant(E->getType());
+
+ case CastExpr::CK_BaseToDerivedMemberPointer: {
+ Expr *SubExpr = E->getSubExpr();
+
+ const MemberPointerType *SrcTy =
+ SubExpr->getType()->getAs<MemberPointerType>();
+ const MemberPointerType *DestTy =
+ E->getType()->getAs<MemberPointerType>();
+
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXRecordDecl>(cast<RecordType>(DestTy->getClass())->getDecl());
+
+ if (SrcTy->getPointeeType()->isFunctionProtoType()) {
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+ if (!C)
+ return 0;
+
+ llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
+
+ // Check if we need to update the adjustment.
+ if (llvm::Constant *Offset =
+ CGM.GetNonVirtualBaseClassOffset(DerivedClass, E->getBasePath())) {
+ llvm::Constant *Values[2];
+
+ Values[0] = CS->getOperand(0);
+ Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
+ /*Packed=*/false);
+ }
+
+ return CS;
+ }
+ }
+
+ case CastExpr::CK_BitCast:
+ // This must be a member function pointer cast.
+ return Visit(E->getSubExpr());
+
+ default: {
+ // FIXME: This should be handled by the CK_NoOp cast kind.
+ // Explicit and implicit no-op casts
+ QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
+ if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
+ return Visit(E->getSubExpr());
+
+ // Handle integer->integer casts for address-of-label differences.
+ if (Ty->isIntegerType() && SubTy->isIntegerType() &&
+ CGF) {
+ llvm::Value *Src = Visit(E->getSubExpr());
+ if (Src == 0) return 0;
+
+ // Use EmitScalarConversion to perform the conversion.
+ return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
+ }
+
+ return 0;
+ }
+ }
+ }
+
+ llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+
+ llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+ unsigned NumInitElements = ILE->getNumInits();
+ if (NumInitElements == 1 &&
+ (isa<StringLiteral>(ILE->getInit(0)) ||
+ isa<ObjCEncodeExpr>(ILE->getInit(0))))
+ return Visit(ILE->getInit(0));
+
+ std::vector<llvm::Constant*> Elts;
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+ const llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Initialising an array requires us to automatically
+ // initialise any elements that have not been initialised explicitly
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ unsigned i = 0;
+ bool RewriteType = false;
+ for (; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ RewriteType |= (C->getType() != ElemTy);
+ Elts.push_back(C);
+ }
+
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ for (; i < NumElements; ++i)
+ Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+
+ if (RewriteType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<const llvm::Type*> Types;
+ for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.push_back(Elts[i]->getType());
+ const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+ return CGM.EmitNullConstant(E->getType());
+ }
+
+ llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isScalarType()) {
+ // We have a scalar in braces. Just use the first element.
+ if (ILE->getNumInits() > 0) {
+ Expr *Init = ILE->getInit(0);
+ return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ }
+ return CGM.EmitNullConstant(ILE->getType());
+ }
+
+ if (ILE->getType()->isArrayType())
+ return EmitArrayInitialization(ILE);
+
+ if (ILE->getType()->isRecordType())
+ return EmitStructInitialization(ILE);
+
+ if (ILE->getType()->isUnionType())
+ return EmitUnionInitialization(ILE);
+
+ // If ILE was a constant vector, we would have handled it already.
+ if (ILE->getType()->isVectorType())
+ return 0;
+
+ assert(0 && "Unable to handle InitListExpr");
+ // Get rid of control reaches end of void function warning.
+ // Not reached.
+ return 0;
+ }
+
+ llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (!E->getConstructor()->isTrivial())
+ return 0;
+
+ QualType Ty = E->getType();
+
+ // FIXME: We should not have to call getBaseElementType here.
+ const RecordType *RT =
+ CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // If the class doesn't have a trivial destructor, we can't emit it as a
+ // constant expr.
+ if (!RD->hasTrivialDestructor())
+ return 0;
+
+ // Only copy and default constructors can be trivial.
+
+
+ if (E->getNumArgs()) {
+ assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(E->getConstructor()->isCopyConstructor() &&
+ "trivial ctor has argument but isn't a copy ctor");
+
+ Expr *Arg = E->getArg(0);
+ assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ return Visit(Arg);
+ }
+
+ return CGM.EmitNullConstant(Ty);
+ }
+
+ llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // This must be a string initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ return llvm::ConstantArray::get(VMContext,
+ CGM.GetStringForStringLiteral(E), false);
+ }
+
+ llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ // This must be an @encode initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ std::string Str;
+ CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
+
+ // Resize the string to the right size, adding zeros at the end, or
+ // truncating as needed.
+ Str.resize(CAT->getSize().getZExtValue(), '\0');
+ return llvm::ConstantArray::get(VMContext, Str, false);
+ }
+
+ llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // Utility methods
+ const llvm::Type *ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+ }
+
+public:
+ llvm::Constant *EmitLValue(Expr *E) {
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = Visit(CLE->getInitializer());
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", 0, false,
+ E->getType().getAddressSpace());
+ return C;
+ }
+ case Expr::DeclRefExprClass: {
+ ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
+ if (Decl->hasAttr<WeakRefAttr>())
+ return CGM.GetWeakRefReference(Decl);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+ return CGM.GetAddrOfFunction(FD);
+ if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+ // We can never refer to a variable with local storage.
+ if (!VD->hasLocalStorage()) {
+ if (VD->isFileVarDecl() || VD->hasExternalStorage())
+ return CGM.GetAddrOfGlobalVar(VD);
+ else if (VD->isBlockVarDecl()) {
+ assert(CGF && "Can't access static local vars without CGF");
+ return CGF->GetAddrOfStaticLocalVar(VD);
+ }
+ }
+ }
+ break;
+ }
+ case Expr::StringLiteralClass:
+ return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+ case Expr::ObjCStringLiteralClass: {
+ ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(SL->getString());
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ }
+ case Expr::PredefinedExprClass: {
+ unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
+ if (CGF) {
+ LValue Res = CGF->EmitPredefinedFunctionName(Type);
+ return cast<llvm::Constant>(Res.getAddress());
+ } else if (Type == PredefinedExpr::PrettyFunction) {
+ return CGM.GetAddrOfConstantCString("top level", ".tmp");
+ }
+
+ return CGM.GetAddrOfConstantCString("", ".tmp");
+ }
+ case Expr::AddrLabelExprClass: {
+ assert(CGF && "Invalid address of label expression outside function.");
+ llvm::Constant *Ptr =
+ CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ }
+ case Expr::CallExprClass: {
+ CallExpr* CE = cast<CallExpr>(E);
+ unsigned builtin = CE->isBuiltinCall(CGM.getContext());
+ if (builtin !=
+ Builtin::BI__builtin___CFStringMakeConstantString &&
+ builtin !=
+ Builtin::BI__builtin___NSStringMakeConstantString)
+ break;
+ const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+ const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ if (builtin ==
+ Builtin::BI__builtin___NSStringMakeConstantString) {
+ return CGM.getObjCRuntime().GenerateConstantString(Literal);
+ }
+ // FIXME: need to deal with UCN conversion issues.
+ return CGM.GetAddrOfConstantCFString(Literal);
+ }
+ case Expr::BlockExprClass: {
+ std::string FunctionName;
+ if (CGF)
+ FunctionName = CGF->CurFn->getName();
+ else
+ FunctionName = "global";
+
+ return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ }
+ }
+
+ return 0;
+ }
+};
+
+} // end anonymous namespace.
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ Expr::EvalResult Result;
+
+ bool Success = false;
+
+ if (DestType->isReferenceType())
+ Success = E->EvaluateAsLValue(Result, Context);
+ else
+ Success = E->Evaluate(Result, Context);
+
+ if (Success && !Result.HasSideEffects) {
+ switch (Result.Val.getKind()) {
+ case APValue::Uninitialized:
+ assert(0 && "Constant expressions should be initialized.");
+ return 0;
+ case APValue::LValue: {
+ const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ Result.Val.getLValueOffset().getQuantity());
+
+ llvm::Constant *C;
+ if (const Expr *LVBase = Result.Val.getLValueBase()) {
+ C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
+
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+ }
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+ return C;
+ }
+ }
+ case APValue::Int: {
+ llvm::Constant *C = llvm::ConstantInt::get(VMContext,
+ Result.Val.getInt());
+
+ if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+ }
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Result.Val.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Result.Val.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ }
+ case APValue::Float:
+ return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Result.Val.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Result.Val.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ }
+ case APValue::Vector: {
+ llvm::SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Result.Val.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ APValue &Elt = Result.Val.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
+ else
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+ }
+ return llvm::ConstantVector::get(&Inits[0], Inits.size());
+ }
+ }
+ }
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+static void
+FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
+ std::vector<llvm::Constant *> &Elements,
+ uint64_t StartOffset) {
+ assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
+
+ if (!CGM.getTypes().ContainsPointerToDataMember(T))
+ return;
+
+ if (const ConstantArrayType *CAT =
+ CGM.getContext().getAsConstantArrayType(T)) {
+ QualType ElementTy = CAT->getElementType();
+ uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
+
+ for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
+ FillInNullDataMemberPointers(CGM, ElementTy, Elements,
+ StartOffset + I * ElementSize);
+ }
+ } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->isVirtual() && "Should not see virtual bases here!");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (!CGM.getTypes().ContainsPointerToDataMember(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ FillInNullDataMemberPointers(CGM, I->getType(),
+ Elements, StartOffset + BaseOffset);
+ }
+
+ // Visit all fields.
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ QualType FieldType = I->getType();
+
+ if (!CGM.getTypes().ContainsPointerToDataMember(FieldType))
+ continue;
+
+ uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
+ FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
+ }
+ } else {
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ uint64_t StartIndex = StartOffset / 8;
+ uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
+
+ llvm::Constant *NegativeOne =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
+ -1ULL, /*isSigned=*/true);
+
+ // Fill in the null data member pointer.
+ for (uint64_t I = StartIndex; I != EndIndex; ++I)
+ Elements[I] = NegativeOne;
+ }
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ if (!getTypes().ContainsPointerToDataMember(T))
+ return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
+
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+
+ QualType ElementTy = CAT->getElementType();
+
+ llvm::Constant *Element = EmitNullConstant(ElementTy);
+ unsigned NumElements = CAT->getSize().getZExtValue();
+ std::vector<llvm::Constant *> Array(NumElements);
+ for (unsigned i = 0; i != NumElements; ++i)
+ Array[i] = Element;
+
+ const llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+ return llvm::ConstantArray::get(ATy, Array);
+ }
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
+ unsigned NumElements = STy->getNumElements();
+ std::vector<llvm::Constant *> Elements(NumElements);
+
+ const CGRecordLayout &Layout = getTypes().getCGRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->isVirtual() && "Should not see virtual bases here!");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (!getTypes().ContainsPointerToDataMember(BaseDecl))
+ continue;
+
+ // Currently, all bases are arrays of i8. Figure out how many elements
+ // this base array has.
+ unsigned BaseFieldNo = Layout.getNonVirtualBaseLLVMFieldNo(BaseDecl);
+ const llvm::ArrayType *BaseArrayTy =
+ cast<llvm::ArrayType>(STy->getElementType(BaseFieldNo));
+
+ unsigned NumBaseElements = BaseArrayTy->getNumElements();
+ std::vector<llvm::Constant *> BaseElements(NumBaseElements);
+
+ // Now fill in null data member pointers.
+ FillInNullDataMemberPointers(*this, I->getType(), BaseElements, 0);
+
+ // Now go through all other elements and zero them out.
+ if (NumBaseElements) {
+ llvm::Constant *Zero =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(getLLVMContext()), 0);
+
+ for (unsigned I = 0; I != NumBaseElements; ++I) {
+ if (!BaseElements[I])
+ BaseElements[I] = Zero;
+ }
+ }
+
+ Elements[BaseFieldNo] = llvm::ConstantArray::get(BaseArrayTy,
+ BaseElements);
+ }
+
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I) {
+ const FieldDecl *FD = *I;
+ unsigned FieldNo = Layout.getLLVMFieldNo(FD);
+ Elements[FieldNo] = EmitNullConstant(FD->getType());
+ }
+
+ // Now go through all other fields and zero them out.
+ for (unsigned i = 0; i != NumElements; ++i) {
+ if (!Elements[i])
+ Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
+ }
+
+ return llvm::ConstantStruct::get(STy, Elements);
+ }
+
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1ULL,
+ /*isSigned=*/true);
+}
+
+llvm::Constant *
+CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) {
+
+ // Itanium C++ ABI 2.3:
+ // A pointer to data member is an offset from the base address of the class
+ // object containing it, represented as a ptrdiff_t
+
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(FD->getParent());
+ QualType ClassType =
+ getContext().getTypeDeclType(const_cast<CXXRecordDecl *>(ClassDecl));
+
+ const llvm::StructType *ClassLTy =
+ cast<llvm::StructType>(getTypes().ConvertType(ClassType));
+
+ const CGRecordLayout &RL =
+ getTypes().getCGRecordLayout(FD->getParent());
+ unsigned FieldNo = RL.getLLVMFieldNo(FD);
+ uint64_t Offset =
+ getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
+
+ const llvm::Type *PtrDiffTy =
+ getTypes().ConvertType(getContext().getPointerDiffType());
+
+ return llvm::ConstantInt::get(PtrDiffTy, Offset);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..2108414
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,2006 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+// Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+struct BinOpInfo {
+ Value *LHS;
+ Value *RHS;
+ QualType Ty; // Computation Type.
+ const BinaryOperator *E;
+};
+
+namespace {
+class ScalarExprEmitter
+ : public StmtVisitor<ScalarExprEmitter, Value*> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreResultAssign;
+ llvm::LLVMContext &VMContext;
+public:
+
+ ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
+ VMContext(cgf.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreResultAssign() {
+ bool I = IgnoreResultAssign;
+ IgnoreResultAssign = false;
+ return I;
+ }
+
+ const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+
+ Value *EmitLoadOfLValue(LValue LV, QualType T) {
+ return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ Value *EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType());
+ }
+
+ /// EmitConversionToBool - Convert the specified expression value to a
+ /// boolean (i1) truth value. This is equivalent to "Val != 0".
+ Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy);
+
+ /// EmitNullValue - Emit a value that corresponds to null for the given type.
+ Value *EmitNullValue(QualType Ty);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ Value *VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return 0;
+ }
+ Value *VisitExpr(Expr *S);
+
+ Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+
+ // Leaves.
+ Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+ return llvm::ConstantInt::get(VMContext, E->getValue());
+ }
+ Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+ return llvm::ConstantFP::get(VMContext, E->getValue());
+ }
+ Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),
+ CGF.getContext().typesAreCompatible(
+ E->getArgType1(), E->getArgType2()));
+ }
+ Value *VisitOffsetOfExpr(const OffsetOfExpr *E);
+ Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
+ Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
+ return Builder.CreateBitCast(V, ConvertType(E->getType()));
+ }
+
+ // l-values.
+ Value *VisitDeclRefExpr(DeclRefExpr *E) {
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+ assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ }
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
+ }
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
+ }
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getScalarVal();
+ }
+
+ Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ LValue LV = CGF.EmitObjCIsaExpr(E);
+ Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
+ return V;
+ }
+
+ Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitMemberExpr(MemberExpr *E);
+ Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitInitListExpr(InitListExpr *E);
+
+ Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return CGF.CGM.EmitNullConstant(E->getType());
+ }
+ Value *VisitCastExpr(CastExpr *E) {
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (E->getType()->isVariablyModifiedType())
+ CGF.EmitVLASize(E->getType());
+
+ return EmitCastExpr(E);
+ }
+ Value *EmitCastExpr(CastExpr *E);
+
+ Value *VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getScalarVal();
+ }
+
+ Value *VisitStmtExpr(const StmtExpr *E);
+
+ Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
+
+ // Unary Operators.
+ Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return CGF.EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+ }
+ Value *VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ Value *VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ Value *VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ Value *VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ return EmitLValue(E->getSubExpr()).getAddress();
+ }
+ Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitUnaryPlus(const UnaryOperator *E) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
+ TestAndClearIgnoreResultAssign();
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryMinus (const UnaryOperator *E);
+ Value *VisitUnaryNot (const UnaryOperator *E);
+ Value *VisitUnaryLNot (const UnaryOperator *E);
+ Value *VisitUnaryReal (const UnaryOperator *E);
+ Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryOffsetOf(const UnaryOperator *E);
+
+ // C++
+ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+ return CGF.LoadCXXThis();
+ }
+
+ Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
+ }
+ Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+ return CGF.EmitCXXNewExpr(E);
+ }
+ Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ CGF.EmitCXXDeleteExpr(E);
+ return 0;
+ }
+ Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(),
+ E->EvaluateTrait(CGF.getContext()));
+ }
+
+ Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ CGF.EmitScalarExpr(E->getBase());
+ return 0;
+ }
+
+ Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+
+ Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ CGF.EmitCXXThrowExpr(E);
+ return 0;
+ }
+
+ // Binary Operators.
+ Value *EmitMul(const BinOpInfo &Ops) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ }
+ /// Create a binary op that checks for overflow.
+ /// Currently only supports +, - and *.
+ Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ Value *EmitDiv(const BinOpInfo &Ops);
+ Value *EmitRem(const BinOpInfo &Ops);
+ Value *EmitAdd(const BinOpInfo &Ops);
+ Value *EmitSub(const BinOpInfo &Ops);
+ Value *EmitShl(const BinOpInfo &Ops);
+ Value *EmitShr(const BinOpInfo &Ops);
+ Value *EmitAnd(const BinOpInfo &Ops) {
+ return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+ }
+ Value *EmitXor(const BinOpInfo &Ops) {
+ return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+ }
+ Value *EmitOr (const BinOpInfo &Ops) {
+ return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+ }
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
+ Value *&BitFieldResult);
+
+ Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+ // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+ Value *VisitBin ## OP(const BinaryOperator *E) { \
+ return Emit ## OP(EmitBinOps(E)); \
+ } \
+ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+ }
+ HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
+ HANDLEBINOP(Rem)
+ HANDLEBINOP(Add)
+ HANDLEBINOP(Sub)
+ HANDLEBINOP(Shl)
+ HANDLEBINOP(Shr)
+ HANDLEBINOP(And)
+ HANDLEBINOP(Xor)
+ HANDLEBINOP(Or)
+#undef HANDLEBINOP
+
+ // Comparisons.
+ Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+ Value *VisitBin##CODE(const BinaryOperator *E) { \
+ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+ llvm::FCmpInst::FP); }
+ VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
+ VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
+ VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
+ VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
+ VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
+ VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
+#undef VISITCOMP
+
+ Value *VisitBinAssign (const BinaryOperator *E);
+
+ Value *VisitBinLAnd (const BinaryOperator *E);
+ Value *VisitBinLOr (const BinaryOperator *E);
+ Value *VisitBinComma (const BinaryOperator *E);
+
+ Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // Other Operators.
+ Value *VisitBlockExpr(const BlockExpr *BE);
+ Value *VisitConditionalOperator(const ConditionalOperator *CO);
+ Value *VisitChooseExpr(ChooseExpr *CE);
+ Value *VisitVAArgExpr(VAArgExpr *VE);
+ Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return CGF.EmitObjCStringLiteral(E);
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value. This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+ assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
+
+ if (SrcType->isRealFloatingType()) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateFCmpUNE(Src, Zero, "tobool");
+ }
+
+ if (SrcType->isMemberPointerType()) {
+ // Compare against -1.
+ llvm::Value *NegativeOne = llvm::Constant::getAllOnesValue(Src->getType());
+ return Builder.CreateICmpNE(Src, NegativeOne, "tobool");
+ }
+
+ assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+ "Unknown scalar type to convert");
+
+ // Because of the type rules of C, we often end up computing a logical value,
+ // then zero extending it to int, then wanting it as a logical value again.
+ // Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
+ if (ZI->getOperand(0)->getType() ==
+ llvm::Type::getInt1Ty(CGF.getLLVMContext())) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ // Compare against an integer or pointer null.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateICmpNE(Src, Zero, "tobool");
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType) {
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ if (DstType->isVoidType()) return 0;
+
+ llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstType->isBooleanType())
+ return EmitConversionToBool(Src, SrcType);
+
+ const llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (Src->getType() == DstTy)
+ return Src;
+
+ // Handle pointer conversions next: pointers can only be converted to/from
+ // other pointers and integers. Check for pointer types in terms of LLVM, as
+ // some native types (like Obj-C id) may map to a pointer type.
+ if (isa<llvm::PointerType>(DstTy)) {
+ // The source value may be an integer, or a pointer.
+ if (isa<llvm::PointerType>(Src->getType()))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ const llvm::Type *MiddleTy =
+ llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ bool InputSigned = SrcType->isSignedIntegerType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+ // Then, cast to pointer.
+ return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+ }
+
+ if (isa<llvm::PointerType>(Src->getType())) {
+ // Must be an ptr to int cast.
+ assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+ return Builder.CreatePtrToInt(Src, DstTy, "conv");
+ }
+
+ // A scalar can be splatted to an extended vector of the same element type
+ if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
+ // Cast the scalar to element type
+ QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
+ llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+ // Splat the element across to all elements
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ for (unsigned i = 0; i < NumElements; i++)
+ Args.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ // Allow bitcast from vector to integer/fp of the same size.
+ if (isa<llvm::VectorType>(Src->getType()) ||
+ isa<llvm::VectorType>(DstTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ // Finally, we have the arithmetic types: real int/float.
+ if (isa<llvm::IntegerType>(Src->getType())) {
+ bool InputSigned = SrcType->isSignedIntegerType();
+ if (isa<llvm::IntegerType>(DstTy))
+ return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ return Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ return Builder.CreateUIToFP(Src, DstTy, "conv");
+ }
+
+ assert(Src->getType()->isFloatingPointTy() && "Unknown real conversion");
+ if (isa<llvm::IntegerType>(DstTy)) {
+ if (DstType->isSignedIntegerType())
+ return Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPToUI(Src, DstTy, "conv");
+ }
+
+ assert(DstTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstTy->getTypeID() < Src->getType()->getTypeID())
+ return Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPExt(Src, DstTy, "conv");
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *ScalarExprEmitter::
+EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy) {
+ // Get the source element type.
+ SrcTy = SrcTy->getAs<ComplexType>()->getElementType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstTy->isBooleanType()) {
+ // Complex != 0 -> (Real != 0) | (Imag != 0)
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+ return Builder.CreateOr(Src.first, Src.second, "tobool");
+ }
+
+ // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+ // the imaginary part of the complex value is discarded and the value of the
+ // real part is converted according to the conversion rules for the
+ // corresponding real type.
+ return EmitScalarConversion(Src.first, SrcTy, DstTy);
+}
+
+Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
+ const llvm::Type *LTy = ConvertType(Ty);
+
+ if (!Ty->isMemberPointerType())
+ return llvm::Constant::getNullValue(LTy);
+
+ assert(!Ty->isMemberFunctionPointerType() &&
+ "member function pointers are not scalar!");
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ return llvm::ConstantInt::get(LTy, -1ULL, /*isSigned=*/true);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "scalar expression");
+ if (E->getType()->isVoidType())
+ return 0;
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ llvm::SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
+ }
+ Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+ Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+ Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
+ return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+ if (E->isArrow())
+ CGF.EmitScalarExpr(E->getBase());
+ else
+ EmitLValue(E->getBase());
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ }
+ return EmitLoadOfLValue(E);
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Emit subscript expressions in rvalue context's. For most cases, this just
+ // loads the lvalue formed by the subscript expr. However, we have to be
+ // careful, because the base of a vector subscript is occasionally an rvalue,
+ // so we can't get it as an lvalue.
+ if (!E->getBase()->getType()->isVectorType())
+ return EmitLoadOfLValue(E);
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *Base = Visit(E->getBase());
+ Value *Idx = Visit(E->getIdx());
+ bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
+ Idx = Builder.CreateIntCast(Idx,
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()),
+ IdxSigned,
+ "vecidxcast");
+ return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+ unsigned Off, const llvm::Type *I32Ty) {
+ int MV = SVI->getMaskValue(Idx);
+ if (MV == -1)
+ return llvm::UndefValue::get(I32Ty);
+ return llvm::ConstantInt::get(I32Ty, Off+MV);
+}
+
+Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ unsigned NumInitElements = E->getNumInits();
+
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ const llvm::VectorType *VType =
+ dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+
+ // We have a scalar in braces. Just use the first element.
+ if (!VType)
+ return Visit(E->getInit(0));
+
+ unsigned ResElts = VType->getNumElements();
+ const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext());
+
+ // Loop over initializers collecting the Value for each, and remembering
+ // whether the source was swizzle (ExtVectorElementExpr). This will allow
+ // us to fold the shuffle for the swizzle into the shuffle for the vector
+ // initializer, since LLVM optimizers generally do not want to touch
+ // shuffles.
+ unsigned CurIdx = 0;
+ bool VIsUndefShuffle = false;
+ llvm::Value *V = llvm::UndefValue::get(VType);
+ for (unsigned i = 0; i != NumInitElements; ++i) {
+ Expr *IE = E->getInit(i);
+ Value *Init = Visit(IE);
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+
+ const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+
+ // Handle scalar elements. If the scalar initializer is actually one
+ // element of a different vector of the same width, use shuffle instead of
+ // extract+insert.
+ if (!VVT) {
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
+
+ if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+ llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
+ Value *LHS = 0, *RHS = 0;
+ if (CurIdx == 0) {
+ // insert into undef -> shuffle (src, undef)
+ Args.push_back(C);
+ for (unsigned j = 1; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(I32Ty));
+
+ LHS = EI->getVectorOperand();
+ RHS = V;
+ VIsUndefShuffle = true;
+ } else if (VIsUndefShuffle) {
+ // insert into undefshuffle && size match -> shuffle (v, src)
+ llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(getMaskElt(SVV, j, 0, I32Ty));
+ Args.push_back(llvm::ConstantInt::get(I32Ty,
+ ResElts + C->getZExtValue()));
+ for (unsigned j = CurIdx + 1; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(I32Ty));
+
+ LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+ RHS = EI->getVectorOperand();
+ VIsUndefShuffle = false;
+ }
+ if (!Args.empty()) {
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+ ++CurIdx;
+ continue;
+ }
+ }
+ }
+ Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+ V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ VIsUndefShuffle = false;
+ ++CurIdx;
+ continue;
+ }
+
+ unsigned InitElts = VVT->getNumElements();
+
+ // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
+ // input is the same width as the vector being constructed, generate an
+ // optimized shuffle of the swizzle input into the result.
+ unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
+ Value *SVOp = SVI->getOperand(0);
+ const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+
+ if (OpTy->getNumElements() == ResElts) {
+ for (unsigned j = 0; j != CurIdx; ++j) {
+ // If the current vector initializer is a shuffle with undef, merge
+ // this shuffle directly into it.
+ if (VIsUndefShuffle) {
+ Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
+ I32Ty));
+ } else {
+ Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ }
+ }
+ for (unsigned j = 0, je = InitElts; j != je; ++j)
+ Args.push_back(getMaskElt(SVI, j, Offset, I32Ty));
+ for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(I32Ty));
+
+ if (VIsUndefShuffle)
+ V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+
+ Init = SVOp;
+ }
+ }
+
+ // Extend init to result vector length, and then shuffle its contribution
+ // to the vector initializer into V.
+ if (Args.empty()) {
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ for (unsigned j = InitElts; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(I32Ty));
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
+ Mask, "vext");
+
+ Args.clear();
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset));
+ for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
+ Args.push_back(llvm::UndefValue::get(I32Ty));
+ }
+
+ // If V is undef, make sure it ends up on the RHS of the shuffle to aid
+ // merging subsequent shuffles into this one.
+ if (CurIdx == 0)
+ std::swap(V, Init);
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+ V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+ VIsUndefShuffle = isa<llvm::UndefValue>(Init);
+ CurIdx += InitElts;
+ }
+
+ // FIXME: evaluate codegen vs. shuffling against constant null vector.
+ // Emit remaining default initializers.
+ const llvm::Type *EltTy = VType->getElementType();
+
+ // Emit remaining default initializers
+ for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
+ Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+ llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
+ V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ }
+ return V;
+}
+
+static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+
+ if (CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase)
+ return false;
+
+ if (isa<CXXThisExpr>(E)) {
+ // We always assume that 'this' is never null.
+ return false;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ // And that lvalue casts are never null.
+ if (ICE->isLvalueCast())
+ return false;
+ }
+
+ return true;
+}
+
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
+ Expr *E = CE->getSubExpr();
+ QualType DestTy = CE->getType();
+ CastExpr::CastKind Kind = CE->getCastKind();
+
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ // Since almost all cast kinds apply to scalars, this switch doesn't have
+ // a default case, so the compiler will warn on a missing case. The cases
+ // are in the same order as in the CastKind enum.
+ switch (Kind) {
+ case CastExpr::CK_Unknown:
+ // FIXME: All casts should have a known kind!
+ //assert(0 && "Unknown cast kind!");
+ break;
+
+ case CastExpr::CK_AnyPointerToObjCPointerCast:
+ case CastExpr::CK_AnyPointerToBlockPointerCast:
+ case CastExpr::CK_BitCast: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreateBitCast(Src, ConvertType(DestTy));
+ }
+ case CastExpr::CK_NoOp:
+ case CastExpr::CK_UserDefinedConversion:
+ return Visit(const_cast<Expr*>(E));
+
+ case CastExpr::CK_BaseToDerived: {
+ const CXXRecordDecl *DerivedClassDecl =
+ DestTy->getCXXRecordDeclForPointerType();
+
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ CE->getBasePath(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CastExpr::CK_UncheckedDerivedToBase:
+ case CastExpr::CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
+ CE->getBasePath(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CastExpr::CK_Dynamic: {
+ Value *V = Visit(const_cast<Expr*>(E));
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
+ return CGF.EmitDynamicCast(V, DCE);
+ }
+ case CastExpr::CK_ToUnion:
+ assert(0 && "Should be unreachable!");
+ break;
+
+ case CastExpr::CK_ArrayToPointerDecay: {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!E->getType()->isVariableArrayType()) {
+ assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
+ assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
+ ->getElementType()) &&
+ "Expected pointer to array");
+ V = Builder.CreateStructGEP(V, 0, "arraydecay");
+ }
+
+ return V;
+ }
+ case CastExpr::CK_FunctionToPointerDecay:
+ return EmitLValue(E).getAddress();
+
+ case CastExpr::CK_NullToMemberPointer:
+ return CGF.CGM.EmitNullConstant(DestTy);
+
+ case CastExpr::CK_BaseToDerivedMemberPointer:
+ case CastExpr::CK_DerivedToBaseMemberPointer: {
+ Value *Src = Visit(E);
+
+ // See if we need to adjust the pointer.
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()->
+ getClass()->getAs<RecordType>()->getDecl());
+ const CXXRecordDecl *DerivedDecl =
+ cast<CXXRecordDecl>(CE->getType()->getAs<MemberPointerType>()->
+ getClass()->getAs<RecordType>()->getDecl());
+ if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+ std::swap(DerivedDecl, BaseDecl);
+
+ if (llvm::Constant *Adj =
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
+ CE->getBasePath())) {
+ if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+ Src = Builder.CreateSub(Src, Adj, "adj");
+ else
+ Src = Builder.CreateAdd(Src, Adj, "adj");
+ }
+ return Src;
+ }
+
+ case CastExpr::CK_ConstructorConversion:
+ assert(0 && "Should be unreachable!");
+ break;
+
+ case CastExpr::CK_IntegralToPointer: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ const llvm::Type *MiddleTy =
+ llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ bool InputSigned = E->getType()->isSignedIntegerType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+
+ return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+ }
+ case CastExpr::CK_PointerToIntegral: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
+ }
+ case CastExpr::CK_ToVoid: {
+ CGF.EmitAnyExpr(E, 0, false, true);
+ return 0;
+ }
+ case CastExpr::CK_VectorSplat: {
+ const llvm::Type *DstTy = ConvertType(DestTy);
+ Value *Elt = Visit(const_cast<Expr*>(E));
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+ // Splat the element across to all elements
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ for (unsigned i = 0; i < NumElements; i++)
+ Args.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+ case CastExpr::CK_IntegralCast:
+ case CastExpr::CK_IntegralToFloating:
+ case CastExpr::CK_FloatingToIntegral:
+ case CastExpr::CK_FloatingCast:
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+
+ case CastExpr::CK_MemberPointerToBoolean:
+ return CGF.EvaluateExprAsBool(E);
+ }
+
+ // Handle cases where the source is an non-complex type.
+
+ if (!CGF.hasAggregateLLVMType(E->getType())) {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // Use EmitScalarConversion to perform the conversion.
+ return EmitScalarConversion(Src, E->getType(), DestTy);
+ }
+
+ if (E->getType()->isAnyComplexType()) {
+ // Handle cases where the source is a complex type.
+ bool IgnoreImag = true;
+ bool IgnoreImagAssign = true;
+ bool IgnoreReal = IgnoreResultAssign;
+ bool IgnoreRealAssign = IgnoreResultAssign;
+ if (DestTy->isBooleanType())
+ IgnoreImagAssign = IgnoreImag = false;
+ else if (DestTy->isVoidType()) {
+ IgnoreReal = IgnoreImag = false;
+ IgnoreRealAssign = IgnoreImagAssign = true;
+ }
+ CodeGenFunction::ComplexPairTy V
+ = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign);
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+ }
+
+ // Okay, this is a cast from an aggregate. It must be a cast to void. Just
+ // evaluate the result and return.
+ CGF.EmitAggExpr(E, 0, false, true);
+ return 0;
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType()).getScalarVal();
+}
+
+Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ llvm::Value *V = CGF.GetAddrOfBlockDecl(E);
+ if (E->getType().isObjCGCWeak())
+ return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V);
+ return Builder.CreateLoad(V, "tmp");
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Operators
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ if (Op->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFNeg(Op, "neg");
+ return Builder.CreateNeg(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+ // Compare operand to zero.
+ Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+ // Invert value.
+ // TODO: Could dynamically modify easy computations here. For example, if
+ // the operand is an icmp ne, turn into icmp eq.
+ BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+ // ZExt result to the expr type.
+ return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+Value *ScalarExprEmitter::VisitOffsetOfExpr(const OffsetOfExpr *E) {
+ Expr::EvalResult Result;
+ if(E->Evaluate(Result, CGF.getContext()))
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+
+ // FIXME: Cannot support code generation for non-constant offsetof.
+ unsigned DiagID = CGF.CGM.getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile non-constant __builtin_offsetof");
+ CGF.CGM.getDiags().Report(CGF.getContext().getFullLoc(E->getLocStart()),
+ DiagID)
+ << E->getSourceRange();
+
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
+ QualType TypeToSize = E->getTypeOfArgument();
+ if (E->isSizeOf()) {
+ if (const VariableArrayType *VAT =
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ if (E->isArgumentType()) {
+ // sizeof(type) - make sure to emit the VLA size.
+ CGF.EmitVLASize(TypeToSize);
+ } else {
+ // C99 6.5.3.4p2: If the argument is an expression of type
+ // VLA, it is evaluated.
+ CGF.EmitAnyExpr(E->getArgumentExpr());
+ }
+
+ return CGF.GetVLASize(VAT);
+ }
+ }
+
+ // If this isn't sizeof(vla), the result must be constant; use the constant
+ // folding logic so we don't have to duplicate it here.
+ Expr::EvalResult Result;
+ E->Evaluate(Result, CGF.getContext());
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+ return Visit(Op);
+}
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+
+ // __imag on a scalar returns zero. Emit the subexpr to ensure side
+ // effects are evaluated, but not the actual value.
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) {
+ Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
+ const llvm::Type* ResultType = ConvertType(E->getType());
+ return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ BinOpInfo Result;
+ Result.LHS = Visit(E->getLHS());
+ Result.RHS = Visit(E->getRHS());
+ Result.Ty = E->getType();
+ Result.E = E;
+ return Result;
+}
+
+LValue ScalarExprEmitter::EmitCompoundAssignLValue(
+ const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
+ Value *&BitFieldResult) {
+ QualType LHSTy = E->getLHS()->getType();
+ BitFieldResult = 0;
+ BinOpInfo OpInfo;
+
+ if (E->getComputationResultType()->isAnyComplexType()) {
+ // This needs to go through the complex expression emitter, but it's a tad
+ // complicated to do that... I'm leaving it out for now. (Note that we do
+ // actually need the imaginary part of the RHS for multiplication and
+ // division.)
+ CGF.ErrorUnsupported(E, "complex compound assignment");
+ llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ return LValue();
+ }
+
+ // Emit the RHS first. __block variables need to have the rhs evaluated
+ // first, plus this should improve codegen a little.
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.E = E;
+ // Load/convert the LHS.
+ LValue LHSLV = EmitCheckedLValue(E->getLHS());
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
+ // Expand the binary operator.
+ Value *Result = (this->*Func)(OpInfo);
+
+ // Convert the result back to the LHS type.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ // Store the result value into the LHS lvalue. Bit-fields are handled
+ // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after the
+ // assignment...'.
+ if (LHSLV.isBitField()) {
+ if (!LHSLV.isVolatileQualified()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
+ &Result);
+ BitFieldResult = Result;
+ return LHSLV;
+ } else
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
+ } else
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+ return LHSLV;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ Value *BitFieldResult;
+ LValue LHSLV = EmitCompoundAssignLValue(E, Func, BitFieldResult);
+ if (BitFieldResult)
+ return BitFieldResult;
+
+ if (Ignore)
+ return 0;
+ return EmitLoadOfLValue(LHSLV, E->getType());
+}
+
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ else if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+ else
+ return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+ // Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+ else
+ return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+ unsigned IID;
+ unsigned OpID = 0;
+
+ switch (Ops.E->getOpcode()) {
+ case BinaryOperator::Add:
+ case BinaryOperator::AddAssign:
+ OpID = 1;
+ IID = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case BinaryOperator::Sub:
+ case BinaryOperator::SubAssign:
+ OpID = 2;
+ IID = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case BinaryOperator::Mul:
+ case BinaryOperator::MulAssign:
+ OpID = 3;
+ IID = llvm::Intrinsic::smul_with_overflow;
+ break;
+ default:
+ assert(false && "Unsupported operation for overflow detection");
+ IID = 0;
+ }
+ OpID <<= 1;
+ OpID |= 1;
+
+ const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
+
+ Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
+ Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+ Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+ // Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *overflowBB =
+ CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::BasicBlock *continueBB =
+ CGF.createBasicBlock("overflow.continue", CGF.CurFn);
+
+ Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+ // Handle overflow
+
+ Builder.SetInsertPoint(overflowBB);
+
+ // Handler is:
+ // long long *__overflow_handler)(long long a, long long b, char op,
+ // char width)
+ std::vector<const llvm::Type*> handerArgTypes;
+ handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+ handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+ handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
+ handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
+ llvm::FunctionType *handlerTy = llvm::FunctionType::get(
+ llvm::Type::getInt64Ty(VMContext), handerArgTypes, false);
+ llvm::Value *handlerFunction =
+ CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
+ llvm::PointerType::getUnqual(handlerTy));
+ handlerFunction = Builder.CreateLoad(handlerFunction);
+
+ llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
+ Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)),
+ Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)),
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID),
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext),
+ cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+
+ Builder.CreateBr(continueBB);
+
+ // Set up the continuation
+ Builder.SetInsertPoint(continueBB);
+ // Get the correct result
+ llvm::PHINode *phi = Builder.CreatePHI(opTy);
+ phi->reserveOperandSpace(2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
+ if (!Ops.Ty->isAnyPointerType()) {
+ if (CGF.getContext().getLangOptions().OverflowChecking &&
+ Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
+
+ // Signed integer overflow is undefined behavior.
+ if (Ops.Ty->isSignedIntegerType())
+ return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
+
+ return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+ }
+
+ if (Ops.Ty->isPointerType() &&
+ Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size
+ CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
+ }
+ Value *Ptr, *Idx;
+ Expr *IdxExp;
+ const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>();
+ const ObjCObjectPointerType *OPT =
+ Ops.E->getLHS()->getType()->getAs<ObjCObjectPointerType>();
+ if (PT || OPT) {
+ Ptr = Ops.LHS;
+ Idx = Ops.RHS;
+ IdxExp = Ops.E->getRHS();
+ } else { // int + pointer
+ PT = Ops.E->getRHS()->getType()->getAs<PointerType>();
+ OPT = Ops.E->getRHS()->getType()->getAs<ObjCObjectPointerType>();
+ assert((PT || OPT) && "Invalid add expr");
+ Ptr = Ops.RHS;
+ Idx = Ops.LHS;
+ IdxExp = Ops.E->getLHS();
+ }
+
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType =
+ llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ if (IdxExp->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+ const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
+ // Handle interface types, which are not represented with a concrete type.
+ if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic extensions. The
+ // GNU void* casts amount to no-ops since our void* type is i8*, but this is
+ // future proof.
+ if (ElementType->isVoidType() || ElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
+ if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
+
+ // Signed integer overflow is undefined behavior.
+ if (Ops.Ty->isSignedIntegerType())
+ return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+
+ return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ }
+
+ if (Ops.E->getLHS()->getType()->isPointerType() &&
+ Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size for
+ // ptr-int
+ // The amount of the division needs to account for the VLA size for
+ // ptr-ptr.
+ CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
+ }
+
+ const QualType LHSType = Ops.E->getLHS()->getType();
+ const QualType LHSElementType = LHSType->getPointeeType();
+ if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
+ // pointer - int
+ Value *Idx = Ops.RHS;
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType =
+ llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ if (Ops.E->getRHS()->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+ Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
+
+ // Handle interface types, which are not represented with a concrete type.
+ if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().
+ getTypeSizeInChars(OIT).getQuantity());
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic
+ // extensions. The GNU void* casts amount to no-ops since our void* type is
+ // i8*, but this is future proof.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
+ } else {
+ // pointer - pointer
+ Value *LHS = Ops.LHS;
+ Value *RHS = Ops.RHS;
+
+ CharUnits ElementSize;
+
+ // Handle GCC extension for pointer arithmetic on void* and function pointer
+ // types.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ ElementSize = CharUnits::One();
+ } else {
+ ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
+ }
+
+ const llvm::Type *ResultType = ConvertType(Ops.Ty);
+ LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Optimize out the shift for element size of 1.
+ if (ElementSize.isOne())
+ return BytesBetween;
+
+ // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+ // pointer difference in C is only defined in the case where both operands
+ // are pointing to elements of an array.
+ Value *BytesPerElt =
+ llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
+ return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ }
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+ return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc) {
+ TestAndClearIgnoreResultAssign();
+ Value *Result;
+ QualType LHSTy = E->getLHS()->getType();
+ if (LHSTy->isMemberFunctionPointerType()) {
+ Value *LHSPtr = CGF.EmitAnyExprToTemp(E->getLHS()).getAggregateAddr();
+ Value *RHSPtr = CGF.EmitAnyExprToTemp(E->getRHS()).getAggregateAddr();
+ llvm::Value *LHSFunc = Builder.CreateStructGEP(LHSPtr, 0);
+ LHSFunc = Builder.CreateLoad(LHSFunc);
+ llvm::Value *RHSFunc = Builder.CreateStructGEP(RHSPtr, 0);
+ RHSFunc = Builder.CreateLoad(RHSFunc);
+ Value *ResultF = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHSFunc, RHSFunc, "cmp.func");
+ Value *NullPtr = llvm::Constant::getNullValue(LHSFunc->getType());
+ Value *ResultNull = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHSFunc, NullPtr, "cmp.null");
+ llvm::Value *LHSAdj = Builder.CreateStructGEP(LHSPtr, 1);
+ LHSAdj = Builder.CreateLoad(LHSAdj);
+ llvm::Value *RHSAdj = Builder.CreateStructGEP(RHSPtr, 1);
+ RHSAdj = Builder.CreateLoad(RHSAdj);
+ Value *ResultA = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHSAdj, RHSAdj, "cmp.adj");
+ if (E->getOpcode() == BinaryOperator::EQ) {
+ Result = Builder.CreateOr(ResultNull, ResultA, "or.na");
+ Result = Builder.CreateAnd(Result, ResultF, "and.f");
+ } else {
+ assert(E->getOpcode() == BinaryOperator::NE &&
+ "Member pointer comparison other than == or != ?");
+ Result = Builder.CreateAnd(ResultNull, ResultA, "and.na");
+ Result = Builder.CreateOr(Result, ResultF, "or.f");
+ }
+ } else if (!LHSTy->isAnyComplexType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ if (LHS->getType()->isFPOrFPVectorTy()) {
+ Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->isSignedIntegerType()) {
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Unsigned integers and pointers.
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ }
+
+ // If this is a vector comparison, sign extend the result to the appropriate
+ // vector integer type and return it (don't convert to bool).
+ if (LHSTy->isVectorType())
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+
+ } else {
+ // Complex Comparison: can only be an equality comparison.
+ CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
+ CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
+
+ QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+
+ Value *ResultR, *ResultI;
+ if (CETy->isRealFloatingType()) {
+ ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ } else {
+ // Complex comparisons can only be equality comparisons. As such, signed
+ // and unsigned opcodes are the same.
+ ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ }
+
+ if (E->getOpcode() == BinaryOperator::EQ) {
+ Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+ } else {
+ assert(E->getOpcode() == BinaryOperator::NE &&
+ "Complex comparison other than == or != ?");
+ Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+ }
+ }
+
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen just a little.
+ Value *RHS = Visit(E->getRHS());
+ LValue LHS = EmitCheckedLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField()) {
+ if (!LHS.isVolatileQualified()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+ &RHS);
+ return RHS;
+ } else
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
+ } else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+ if (Ignore)
+ return 0;
+ return EmitLoadOfLValue(LHS, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+ const llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+ // If we have 1 && X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == 1) { // If we have 1 && X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
+ }
+
+ // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::Constant::getNullValue(ResTy);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+
+ // Branch on the LHS first. If it is false, go to the failure (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be false. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ CGF.EndConditionalBranch();
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+ const llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+ // If we have 0 || X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == -1) { // If we have 0 || X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
+ }
+
+ // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::ConstantInt::get(ResTy, 1);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+ // Branch on the LHS first. If it is true, go to the success (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be true. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
+
+ CGF.BeginConditionalBranch();
+
+ // Emit the RHS condition as a bool value.
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ CGF.EndConditionalBranch();
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally. This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
+ CodeGenFunction &CGF) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF);
+
+ // TODO: Allow anything we can constant fold to an integer or fp constant.
+ if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
+ isa<FloatingLiteral>(E))
+ return true;
+
+ // Non-volatile automatic variables too, to get "cond ? X : Y" where
+ // X and Y are local variables.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && !(CGF.getContext()
+ .getCanonicalType(VD->getType())
+ .isVolatileQualified()))
+ return true;
+
+ return false;
+}
+
+
+Value *ScalarExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
+ Expr *Live = E->getLHS(), *Dead = E->getRHS();
+ if (Cond == -1)
+ std::swap(Live, Dead);
+
+ // If the dead side doesn't have labels we need, and if the Live side isn't
+ // the gnu missing ?: extension (which we could handle, but don't bother
+ // to), just emit the Live part.
+ if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part
+ Live) // Live part isn't missing.
+ return Visit(Live);
+ }
+
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap and
+ // safe to evaluate the LHS and RHS unconditionally.
+ if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(),
+ CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
+ llvm::Value *LHS = Visit(E->getLHS());
+ llvm::Value *RHS = Visit(E->getRHS());
+ return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+ }
+
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ Value *CondVal = 0;
+
+ // If we don't have the GNU missing condition extension, emit a branch on bool
+ // the normal way.
+ if (E->getLHS()) {
+ // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
+ // the branch on bool.
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ } else {
+ // Otherwise, for the ?: extension, evaluate the conditional and then
+ // convert it to bool the hard way. We do this explicitly because we need
+ // the unconverted value for the missing middle value of the ?:.
+ CondVal = CGF.EmitScalarExpr(E->getCond());
+
+ // In some cases, EmitScalarConversion will delete the "CondVal" expression
+ // if there are no extra uses (an optimization). Inhibit this by making an
+ // extra dead use, because we're going to add a use of CondVal later. We
+ // don't use the builder for this, because we don't want it to get optimized
+ // away. This leaves dead code, but the ?: extension isn't common.
+ new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
+ Builder.GetInsertBlock());
+
+ Value *CondBoolVal =
+ CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
+ CGF.getContext().BoolTy);
+ Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
+ }
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ Value *LHS;
+ if (E->getLHS())
+ LHS = Visit(E->getLHS());
+ else // Perform promotions, to handle cases like "short ?: int"
+ LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+
+ CGF.EndConditionalBranch();
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.BeginConditionalBranch();
+ CGF.EmitBlock(RHSBlock);
+
+ Value *RHS = Visit(E->getRHS());
+ CGF.EndConditionalBranch();
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ // If the LHS or RHS is a throw expression, it will be legitimately null.
+ if (!LHS)
+ return RHS;
+ if (!RHS)
+ return LHS;
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
+ PN->reserveOperandSpace(2);
+ PN->addIncoming(LHS, LHSBlock);
+ PN->addIncoming(RHS, RHSBlock);
+ return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ if (!ArgPtr)
+ return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
+
+ // FIXME Volatility.
+ return Builder.CreateLoad(ArgPtr);
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
+ return CGF.BuildBlockLiteralTmp(BE);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitScalarExpr - Emit the computation of the specified expression of scalar
+/// type, ignoring the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+ assert(E && !hasAggregateLLVMType(E->getType()) &&
+ "Invalid scalar expression to emit");
+
+ return ScalarExprEmitter(*this, IgnoreResultAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+ QualType DstTy) {
+ assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
+ "Invalid scalar expression to emit");
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+ QualType SrcTy,
+ QualType DstTy) {
+ assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
+ "Invalid complex -> scalar conversion");
+ return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
+ DstTy);
+}
+
+LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
+ llvm::Value *V;
+ // object->isa or (*object).isa
+ // Generate code as for: *(Class*)object
+ // build Class* type
+ const llvm::Type *ClassPtrTy = ConvertType(E->getType());
+
+ Expr *BaseExpr = E->getBase();
+ if (BaseExpr->isLvalue(getContext()) != Expr::LV_Valid) {
+ V = CreateTempAlloca(ClassPtrTy, "resval");
+ llvm::Value *Src = EmitScalarExpr(BaseExpr);
+ Builder.CreateStore(Src, V);
+ LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(LV, E->getType());
+ }
+ else {
+ if (E->isArrow())
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
+ else
+ V = EmitLValue(BaseExpr).getAddress();
+ }
+
+ // build Class* type
+ ClassPtrTy = ClassPtrTy->getPointerTo();
+ V = Builder.CreateBitCast(V, ClassPtrTy);
+ LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ return LV;
+}
+
+
+LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
+ const CompoundAssignOperator *E) {
+ ScalarExprEmitter Scalar(*this);
+ Value *BitFieldResult = 0;
+ switch (E->getOpcode()) {
+#define COMPOUND_OP(Op) \
+ case BinaryOperator::Op##Assign: \
+ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
+ BitFieldResult)
+ COMPOUND_OP(Mul);
+ COMPOUND_OP(Div);
+ COMPOUND_OP(Rem);
+ COMPOUND_OP(Add);
+ COMPOUND_OP(Sub);
+ COMPOUND_OP(Shl);
+ COMPOUND_OP(Shr);
+ COMPOUND_OP(And);
+ COMPOUND_OP(Xor);
+ COMPOUND_OP(Or);
+#undef COMPOUND_OP
+
+ case BinaryOperator::PtrMemD:
+ case BinaryOperator::PtrMemI:
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr:
+ case BinaryOperator::Assign:
+ case BinaryOperator::Comma:
+ assert(false && "Not valid compound assignment operators");
+ break;
+ }
+
+ llvm_unreachable("Unhandled compound assignment operator");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..7c842a9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,842 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(E->getString());
+ // FIXME: This bitcast should just be made an invariant on the Runtime.
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+ // Untyped selector.
+ // Note that this implementation allows for non-constant strings to be passed
+ // as arguments to @selector(). Currently, the only thing preventing this
+ // behaviour is the type checking in the front end.
+ return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+ // FIXME: This should pass the Decl not the name.
+ return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
+}
+
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return) {
+ // Only the lookup mechanism and first two arguments of the method
+ // implementation vary between runtimes. We can get the receiver and
+ // arguments in generic code.
+
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ bool isSuperMessage = false;
+ bool isClassMessage = false;
+ ObjCInterfaceDecl *OID = 0;
+ // Find the receiver
+ llvm::Value *Receiver = 0;
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class: {
+ const ObjCObjectType *ObjTy
+ = E->getClassReceiver()->getAs<ObjCObjectType>();
+ assert(ObjTy && "Invalid Objective-C class message send");
+ OID = ObjTy->getInterface();
+ assert(OID && "Invalid Objective-C class message send");
+ Receiver = Runtime.GetClass(Builder, OID);
+ isClassMessage = true;
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ isClassMessage = true;
+ break;
+ }
+
+ CallArgList Args;
+ EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+
+ if (isSuperMessage) {
+ // super is only valid in an Objective-C method
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return Runtime.GenerateMessageSendSuper(*this, Return, E->getType(),
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args,
+ E->getMethodDecl());
+ }
+
+ return Runtime.GenerateMessageSend(*this, Return, E->getType(),
+ E->getSelector(),
+ Receiver, Args, OID,
+ E->getMethodDecl());
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ FunctionArgList Args;
+ // Check if we should generate debug info for this method.
+ if (CGM.getDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+
+ llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+ const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
+ CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+ Args.push_back(std::make_pair(OMD->getSelfDecl(),
+ OMD->getSelfDecl()->getType()));
+ Args.push_back(std::make_pair(OMD->getCmdDecl(),
+ OMD->getCmdDecl()->getType()));
+
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI)
+ Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+
+ StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocStart());
+}
+
+/// Generate an Objective-C method. An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+ StartObjCMethod(OMD, OMD->getClassInterface());
+ EmitStmt(OMD->getBody());
+ FinishFunction(OMD->getBodyRBrace());
+}
+
+// FIXME: I wasn't sure about the synthesis approach. If we end up generating an
+// AST for the whole body we can just fall back to having a GenerateFunction
+// which takes the body Stmt.
+
+/// GenerateObjCGetter - Generate an Objective-C property getter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ bool IsAtomic =
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
+ ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+ assert(OMD && "Invalid call to generate getter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ // Determine if we should use an objc_getProperty call for
+ // this. Non-atomic properties are directly evaluated.
+ // atomic 'copy' and 'retain' properties are also directly
+ // evaluated in gc-only mode.
+ if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ IsAtomic &&
+ (PD->getSetterKind() == ObjCPropertyDecl::Copy ||
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *GetPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+
+ if (!GetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the
+ // runtime already should have computed it to build the function.
+ RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args,
+ FunctionType::ExtInfo()),
+ GetPropertyFn, ReturnValueSlot(), Args);
+ // We need to fix the type here. Ivars with copy & retain are
+ // always objects so we don't need to worry about complex or
+ // aggregates.
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ } else {
+ if (Ivar->getType()->isAnyComplexType()) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+ StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
+ }
+ else if (hasAggregateLLVMType(Ivar->getType())) {
+ bool IsStrong = false;
+ if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(Ivar->getType())))
+ && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
+ && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetCopyStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList Args;
+ RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *isAtomic =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsAtomic ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(isAtomic),
+ getContext().BoolTy));
+ llvm::Value *hasStrong =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsStrong ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(hasStrong),
+ getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+ }
+ else {
+ if (PID->getGetterCXXConstructor()) {
+ ReturnStmt *Stmt =
+ new (getContext()) ReturnStmt(SourceLocation(),
+ PID->getGetterCXXConstructor(),
+ 0);
+ EmitReturnStmt(*Stmt);
+ }
+ else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ }
+ }
+ } else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ CodeGenTypes &Types = CGM.getTypes();
+ RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ }
+ }
+
+ FinishFunction();
+}
+
+/// GenerateObjCSetter - Generate an Objective-C property setter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+ assert(OMD && "Invalid call to generate setter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
+ bool IsAtomic =
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
+
+ // Determine if we should use an objc_setProperty call for
+ // this. Properties with 'copy' semantics always use it, as do
+ // non-atomic properties with 'release' semantics as long as we are
+ // not in gc-only mode.
+ if (IsCopy ||
+ (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *SetPropertyFn =
+ CGM.getObjCRuntime().GetPropertySetFunction();
+
+ if (!SetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsId =
+ Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
+ Types.ConvertType(IdTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
+ getContext().BoolTy));
+ Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
+ getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ SetPropertyFn,
+ ReturnValueSlot(), Args);
+ } else if (IsAtomic && hasAggregateLLVMType(Ivar->getType()) &&
+ !Ivar->getType()->isAnyComplexType() &&
+ IndirectObjCSetterArg(*CurFnInfo)
+ && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetCopyStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ CallArgList Args;
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+ RValue RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsPtrTy =
+ Builder.CreateBitCast(Arg,
+ Types.ConvertType(getContext().VoidPtrTy));
+ RV = RValue::get(ArgAsPtrTy);
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+ } else if (PID->getSetterCXXAssignment()) {
+ EmitAnyExpr(PID->getSetterCXXAssignment(), (llvm::Value *)0, false, true,
+ false);
+
+ } else {
+ // FIXME: Find a clean way to avoid AST node creation.
+ SourceLocation Loc = PD->getLocation();
+ ValueDecl *Self = OMD->getSelfDecl();
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ DeclRefExpr Base(Self, Self->getType(), Loc);
+ ParmVarDecl *ArgDecl = *OMD->param_begin();
+ DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
+ ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
+
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ if (getContext().getCanonicalType(Ivar->getType()) !=
+ getContext().getCanonicalType(ArgDecl->getType())) {
+ ImplicitCastExpr ArgCasted(Ivar->getType(), CastExpr::CK_BitCast, &Arg,
+ CXXBaseSpecifierArray(), false);
+ BinaryOperator Assign(&IvarRef, &ArgCasted, BinaryOperator::Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ } else {
+ BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ }
+ }
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD,
+ bool ctor) {
+ llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> IvarInitializers;
+ MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
+ StartObjCMethod(MD, IMP->getClassInterface());
+ for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
+ E = IMP->init_end(); B != E; ++B) {
+ CXXBaseOrMemberInitializer *Member = (*B);
+ IvarInitializers.push_back(Member);
+ }
+ if (ctor) {
+ for (unsigned I = 0, E = IvarInitializers.size(); I != E; ++I) {
+ CXXBaseOrMemberInitializer *IvarInit = IvarInitializers[I];
+ FieldDecl *Field = IvarInit->getMember();
+ QualType FieldType = Field->getType();
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ EmitAggExpr(IvarInit->getInit(), LV.getAddress(),
+ LV.isVolatileQualified(), false, true);
+ }
+ // constructor returns 'self'.
+ CodeGenTypes &Types = CGM.getTypes();
+ QualType IdTy(CGM.getContext().getObjCIdType());
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
+ } else {
+ // dtor
+ for (size_t i = IvarInitializers.size(); i > 0; --i) {
+ FieldDecl *Field = IvarInitializers[i - 1]->getMember();
+ QualType FieldType = Field->getType();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(getContext());
+ if (!Dtor->isTrivial()) {
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LV.getAddress(), BasePtr);
+ EmitCXXAggrDestructorCall(Dtor,
+ Array, BaseAddrPtr);
+ } else {
+ EmitCXXDestructorCall(Dtor,
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ LV.getAddress());
+ }
+ }
+ }
+ }
+ FinishFunction();
+}
+
+bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
+ it++; it++;
+ const ABIArgInfo &AI = it->info;
+ // FIXME. Is this sufficient check?
+ return (AI.getKind() == ABIArgInfo::Indirect);
+}
+
+bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return false;
+ if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
+ return FDTTy->getDecl()->hasObjectMember();
+ return false;
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+ const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
+ getContext().getCanonicalType(selfDecl->getType()));
+ return PTy->getPointeeType();
+}
+
+RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
+ const Selector &S,
+ ReturnValueSlot Return) {
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ Return,
+ Exp->getType(),
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ CallArgList());
+
+}
+
+RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp,
+ ReturnValueSlot Return) {
+ Exp = Exp->IgnoreParens();
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getGetterName();
+ if (isa<ObjCSuperExpr>(E->getBase()))
+ return EmitObjCSuperPropertyGet(E, S, Return);
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, Exp->getType(), S,
+ EmitScalarExpr(E->getBase()),
+ CallArgList());
+ } else {
+ const ObjCImplicitSetterGetterRefExpr *KE =
+ cast<ObjCImplicitSetterGetterRefExpr>(Exp);
+ Selector S = KE->getGetterMethod()->getSelector();
+ llvm::Value *Receiver;
+ if (KE->getInterfaceDecl()) {
+ const ObjCInterfaceDecl *OID = KE->getInterfaceDecl();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ } else if (isa<ObjCSuperExpr>(KE->getBase()))
+ return EmitObjCSuperPropertyGet(KE, S, Return);
+ else
+ Receiver = EmitScalarExpr(KE->getBase());
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, Exp->getType(), S,
+ Receiver,
+ CallArgList(), KE->getInterfaceDecl());
+ }
+}
+
+void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
+ const Selector &S,
+ RValue Src) {
+ CallArgList Args;
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ Args.push_back(std::make_pair(Src, Exp->getType()));
+ CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ ReturnValueSlot(),
+ Exp->getType(),
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args);
+ return;
+}
+
+void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
+ RValue Src) {
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getSetterName();
+ if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ }
+ CallArgList Args;
+ Args.push_back(std::make_pair(Src, E->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, S,
+ EmitScalarExpr(E->getBase()),
+ Args);
+ } else if (const ObjCImplicitSetterGetterRefExpr *E =
+ dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
+ Selector S = E->getSetterMethod()->getSelector();
+ CallArgList Args;
+ llvm::Value *Receiver;
+ if (E->getInterfaceDecl()) {
+ const ObjCInterfaceDecl *OID = E->getInterfaceDecl();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ } else if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ } else
+ Receiver = EmitScalarExpr(E->getBase());
+ Args.push_back(std::make_pair(Src, E->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, S,
+ Receiver,
+ Args, E->getInterfaceDecl());
+ } else
+ assert (0 && "bad expression node in EmitObjCPropertySet");
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+ llvm::Constant *EnumerationMutationFn =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
+ llvm::Value *DeclAddress;
+ QualType ElementTy;
+
+ if (!EnumerationMutationFn) {
+ CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+ return;
+ }
+
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ EmitStmt(SD);
+ assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
+ const Decl* D = SD->getSingleDecl();
+ ElementTy = cast<ValueDecl>(D)->getType();
+ DeclAddress = LocalDeclMap[D];
+ } else {
+ ElementTy = cast<Expr>(S.getElement())->getType();
+ DeclAddress = 0;
+ }
+
+ // Fast enumeration state.
+ QualType StateTy = getContext().getObjCFastEnumerationStateType();
+ llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
+ EmitNullInitialization(StatePtr, StateTy);
+
+ // Number of elements in the items array.
+ static const unsigned NumItems = 16;
+
+ // Get selector
+ IdentifierInfo *II[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ Selector FastEnumSel =
+ CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
+
+ QualType ItemsTy =
+ getContext().getConstantArrayType(getContext().getObjCIdType(),
+ llvm::APInt(32, NumItems),
+ ArrayType::Normal, 0);
+ llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+
+ llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(StatePtr),
+ getContext().getPointerType(StateTy)));
+
+ Args.push_back(std::make_pair(RValue::get(ItemsPtr),
+ getContext().getPointerType(ItemsTy)));
+
+ const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+ Args.push_back(std::make_pair(RValue::get(Count),
+ getContext().UnsignedLongTy));
+
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ llvm::Value *LimitPtr = CreateMemTemp(getContext().UnsignedLongTy,
+ "limit.ptr");
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+
+ llvm::BasicBlock *NoElements = createBasicBlock("noelements");
+ llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
+
+ llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+ llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
+
+ EmitBlock(SetStartMutations);
+
+ llvm::Value *StartMutationsPtr = CreateMemTemp(getContext().UnsignedLongTy);
+
+ llvm::Value *StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ "mutationsptr");
+
+ llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
+ "mutations");
+
+ Builder.CreateStore(StateMutations, StartMutationsPtr);
+
+ llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
+ EmitBlock(LoopStart);
+
+ llvm::Value *CounterPtr = CreateMemTemp(getContext().UnsignedLongTy,
+ "counter.ptr");
+ Builder.CreateStore(Zero, CounterPtr);
+
+ llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
+ EmitBlock(LoopBody);
+
+ StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+
+ llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
+ "mutations");
+ llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
+ StartMutations,
+ "tobool");
+
+
+ llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
+ llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
+
+ Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
+
+ EmitBlock(WasMutated);
+ llvm::Value *V =
+ Builder.CreateBitCast(Collection,
+ ConvertType(getContext().getObjCIdType()),
+ "tmp");
+ CallArgList Args2;
+ Args2.push_back(std::make_pair(RValue::get(V),
+ getContext().getObjCIdType()));
+ // FIXME: We shouldn't need to get the function info here, the runtime already
+ // should have computed it to build the function.
+ EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo()),
+ EnumerationMutationFn, ReturnValueSlot(), Args2);
+
+ EmitBlock(WasNotMutated);
+
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+
+ llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
+
+ llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
+ "stateitems");
+
+ llvm::Value *CurrentItemPtr =
+ Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
+
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
+
+ // Cast the item to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem,
+ ConvertType(ElementTy), "tmp");
+
+ if (!DeclAddress) {
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(CurrentItem, LV.getAddress());
+ } else
+ Builder.CreateStore(CurrentItem, DeclAddress);
+
+ // Increment the counter.
+ Counter = Builder.CreateAdd(Counter,
+ llvm::ConstantInt::get(UnsignedLongLTy, 1));
+ Builder.CreateStore(Counter, CounterPtr);
+
+ llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
+ llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
+
+ BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(AfterBody);
+
+ llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
+
+ Counter = Builder.CreateLoad(CounterPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless");
+ Builder.CreateCondBr(IsLess, LoopBody, FetchMore);
+
+ // Fetch more elements.
+ EmitBlock(FetchMore);
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+
+ IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, LoopStart);
+
+ // No more elements.
+ EmitBlock(NoElements);
+
+ if (!DeclAddress) {
+ // If the element was not a declaration, set it to be null.
+
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
+ LV.getAddress());
+ }
+
+ EmitBlock(LoopEnd);
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
+ CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+ const ObjCAtSynchronizedStmt &S) {
+ CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..6c25afe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,2273 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the GNU runtime. The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library. These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+
+#include <map>
+
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::dyn_cast;
+
+// The version of the runtime that this class targets. Must match the version
+// in the runtime.
+static const int RuntimeVersion = 8;
+static const int NonFragileRuntimeVersion = 9;
+static const int ProtocolVersion = 2;
+static const int NonFragileProtocolVersion = 3;
+
+namespace {
+class CGObjCGNU : public CodeGen::CGObjCRuntime {
+private:
+ CodeGen::CodeGenModule &CGM;
+ llvm::Module &TheModule;
+ const llvm::PointerType *SelectorTy;
+ const llvm::IntegerType *Int8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
+ const llvm::FunctionType *IMPTy;
+ const llvm::PointerType *IdTy;
+ const llvm::PointerType *PtrToIdTy;
+ CanQualType ASTIdTy;
+ const llvm::IntegerType *IntTy;
+ const llvm::PointerType *PtrTy;
+ const llvm::IntegerType *LongTy;
+ const llvm::PointerType *PtrToIntTy;
+ llvm::GlobalAlias *ClassPtrAlias;
+ llvm::GlobalAlias *MetaClassPtrAlias;
+ std::vector<llvm::Constant*> Classes;
+ std::vector<llvm::Constant*> Categories;
+ std::vector<llvm::Constant*> ConstantStrings;
+ llvm::StringMap<llvm::Constant*> ObjCStrings;
+ llvm::Function *LoadFunction;
+ llvm::StringMap<llvm::Constant*> ExistingProtocols;
+ typedef std::pair<std::string, std::string> TypedSelector;
+ std::map<TypedSelector, llvm::GlobalAlias*> TypedSelectors;
+ llvm::StringMap<llvm::GlobalAlias*> UntypedSelectors;
+ // Selectors that we don't emit in GC mode
+ Selector RetainSel, ReleaseSel, AutoreleaseSel;
+ // Functions used for GC.
+ llvm::Constant *IvarAssignFn, *StrongCastAssignFn, *MemMoveFn, *WeakReadFn,
+ *WeakAssignFn, *GlobalAssignFn;
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ llvm::Constant *NULLPtr;
+ llvm::LLVMContext &VMContext;
+ /// Metadata kind used to tie method lookups to message sends.
+ unsigned msgSendMDKind;
+private:
+ llvm::Constant *GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
+ llvm::Constant *GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList);
+ llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
+ llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+ llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ llvm::Constant *GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols);
+ // To ensure that all protocols are seen by the runtime, we add a category on
+ // a class defined in the runtime, declaring no methods, but adopting the
+ // protocols.
+ void GenerateProtocolHolderCategory(void);
+ llvm::Constant *GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ bool isMeta=false);
+ llvm::Constant *GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
+ llvm::Constant *MakeConstantString(const std::string &Str, const std::string
+ &Name="");
+ llvm::Constant *ExportUniqueString(const std::string &Str, const std::string
+ prefix);
+ llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
+ llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
+ llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+ void EmitClassRef(const std::string &className);
+ llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, const llvm::Type *Ty){
+ if (V->getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
+public:
+ CGObjCGNU(CodeGen::CodeGenModule &cgm);
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD);
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+ virtual llvm::Function *ModuleInitFunction();
+ virtual llvm::Function *GetPropertyGetFunction();
+ virtual llvm::Function *GetPropertySetFunction();
+ virtual llvm::Function *GetCopyStructFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ QualType Ty);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+} // end anonymous namespace
+
+
+/// Emits a reference to a dummy variable which is emitted with each class.
+/// This ensures that a linker error will be generated when trying to link
+/// together modules where a referenced class is not defined.
+void CGObjCGNU::EmitClassRef(const std::string &className) {
+ std::string symbolRef = "__objc_class_ref_" + className;
+ // Don't emit two copies of the same symbol
+ if (TheModule.getGlobalVariable(symbolRef))
+ return;
+ std::string symbolName = "__objc_class_name_" + className;
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName);
+ if (!ClassSymbol) {
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, 0, symbolName);
+ }
+ new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true,
+ llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
+}
+
+static std::string SymbolNameForMethod(const std::string &ClassName, const
+ std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
+{
+ std::string MethodNameColonStripped = MethodName;
+ std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+ ':', '_');
+ return std::string(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped;
+}
+static std::string MangleSelectorTypes(const std::string &TypeString) {
+ std::string Mangled = TypeString;
+ // Simple mangling to avoid breaking when we mix JIT / static code.
+ // Not part of the ABI, subject to change without notice.
+ std::replace(Mangled.begin(), Mangled.end(), '@', '_');
+ std::replace(Mangled.begin(), Mangled.end(), ':', 'J');
+ std::replace(Mangled.begin(), Mangled.end(), '*', 'e');
+ std::replace(Mangled.begin(), Mangled.end(), '#', 'E');
+ std::replace(Mangled.begin(), Mangled.end(), ':', 'j');
+ std::replace(Mangled.begin(), Mangled.end(), '(', 'g');
+ std::replace(Mangled.begin(), Mangled.end(), ')', 'G');
+ std::replace(Mangled.begin(), Mangled.end(), '[', 'h');
+ std::replace(Mangled.begin(), Mangled.end(), ']', 'H');
+ return Mangled;
+}
+
+CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
+ : CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
+ MetaClassPtrAlias(0), VMContext(cgm.getLLVMContext()) {
+
+ msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+
+ IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ LongTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+
+ Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+ Zeros[1] = Zeros[0];
+ NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ // Get the selector Type.
+ QualType selTy = CGM.getContext().getObjCSelType();
+ if (QualType() == selTy) {
+ SelectorTy = PtrToInt8Ty;
+ } else {
+ SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ }
+
+ PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+ PtrTy = PtrToInt8Ty;
+
+ // Object type
+ ASTIdTy = CGM.getContext().getCanonicalType(CGM.getContext().getObjCIdType());
+ if (QualType() == ASTIdTy) {
+ IdTy = PtrToInt8Ty;
+ } else {
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ }
+ PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+
+ // IMP type
+ std::vector<const llvm::Type*> IMPArgs;
+ IMPArgs.push_back(IdTy);
+ IMPArgs.push_back(SelectorTy);
+ IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
+
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ // Get selectors needed in GC mode
+ RetainSel = GetNullarySelector("retain", CGM.getContext());
+ ReleaseSel = GetNullarySelector("release", CGM.getContext());
+ AutoreleaseSel = GetNullarySelector("autorelease", CGM.getContext());
+
+ // Get functions needed in GC mode
+
+ // id objc_assign_ivar(id, id, ptrdiff_t);
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ Args.push_back(PtrToIdTy);
+ // FIXME: ptrdiff_t
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(IdTy, Args, false);
+ IvarAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ // id objc_assign_strongCast (id, id*)
+ Args.pop_back();
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ StrongCastAssignFn =
+ CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ // id objc_assign_global(id, id*);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ GlobalAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ // id objc_assign_weak(id, id*);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ WeakAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ // id objc_read_weak(id*);
+ Args.clear();
+ Args.push_back(PtrToIdTy);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ WeakReadFn = CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ // void *objc_memmove_collectable(void*, void *, size_t);
+ Args.clear();
+ Args.push_back(PtrToInt8Ty);
+ Args.push_back(PtrToInt8Ty);
+ // FIXME: size_t
+ Args.push_back(LongTy);
+ FTy = llvm::FunctionType::get(IdTy, Args, false);
+ MemMoveFn = CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+}
+
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+ // With the incompatible ABI, this will need to be replaced with a direct
+ // reference to the class symbol. For the compatible nonfragile ABI we are
+ // still performing this lookup at run time but emitting the symbol for the
+ // class externally so that we can make the switch later.
+ EmitClassRef(OID->getNameAsString());
+ ClassName = Builder.CreateStructGEP(ClassName, 0);
+
+ std::vector<const llvm::Type*> Params(1, PtrToInt8Ty);
+ llvm::Constant *ClassLookupFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy,
+ Params,
+ true),
+ "objc_lookup_class");
+ return Builder.CreateCall(ClassLookupFn, ClassName);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+ llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
+ if (US == 0)
+ US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_untyped_selector_alias"+Sel.getAsString(),
+ NULL, &TheModule);
+
+ return Builder.CreateLoad(US);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+
+ std::string SelName = Method->getSelector().getAsString();
+ std::string SelTypes;
+ CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ // Typed selectors
+ TypedSelector Selector = TypedSelector(SelName,
+ SelTypes);
+
+ // If it's already cached, return it.
+ if (TypedSelectors[Selector]) {
+ return Builder.CreateLoad(TypedSelectors[Selector]);
+ }
+
+ // If it isn't, cache it.
+ llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
+ llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::PrivateLinkage, ".objc_selector_alias" + SelName,
+ NULL, &TheModule);
+ TypedSelectors[Selector] = Sel;
+
+ return Builder.CreateLoad(Sel);
+}
+
+llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
+ const std::string &Name) {
+ llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+llvm::Constant *CGObjCGNU::ExportUniqueString(const std::string &Str,
+ const std::string prefix) {
+ std::string name = prefix + Str;
+ llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
+ if (!ConstStr) {
+ llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
+ ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ }
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name,
+ llvm::GlobalValue::LinkageTypes linkage) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name);
+}
+
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, llvm::StringRef Name,
+ llvm::GlobalValue::LinkageTypes linkage) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name);
+}
+
+/// Generate an NSConstantString object.
+llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+
+ std::string Str(SL->getStrData(), SL->getByteLength());
+
+ // Look for an existing one
+ llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
+ if (old != ObjCStrings.end())
+ return old->getValue();
+
+ std::vector<llvm::Constant*> Ivars;
+ Ivars.push_back(NULLPtr);
+ Ivars.push_back(MakeConstantString(Str));
+ Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+ llvm::Constant *ObjCStr = MakeGlobal(
+ llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ Ivars, ".objc_str");
+ ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
+ ObjCStrings[Str] = ObjCStr;
+ ConstantStrings.push_back(ObjCStr);
+ return ObjCStr;
+}
+
+///Generates a message send where the super is the receiver. This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(Receiver);
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *cmd = GetSelector(Builder, Sel);
+
+
+ CallArgList ActualArgs;
+
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(Builder.CreateBitCast(Receiver, IdTy)),
+ ASTIdTy));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *impType =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+ llvm::Value *ReceiverClass = 0;
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = 0;
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_class");
+ }
+ ReceiverClass = Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
+ } else {
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime
+ // load function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_class_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
+ }
+ }
+ // Cast the pointer to a simplified version of the class structure
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(
+ llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
+ // Get the superclass pointer
+ ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass = Builder.CreateLoad(ReceiverClass);
+ // Construct the structure used to look up the IMP
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(VMContext,
+ Receiver->getType(), IdTy, NULL);
+ llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
+
+ Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
+ Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
+
+ // Get the IMP
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
+ Params.push_back(SelectorTy);
+
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+ llvm::Value *imp;
+
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ // The lookup function returns a slot, which can be safely cached.
+ llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
+ IntTy, llvm::PointerType::getUnqual(impType), NULL);
+
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(SlotTy), Params, true),
+ "objc_slot_lookup_super");
+
+ llvm::CallInst *slot = Builder.CreateCall(lookupFunction, lookupArgs,
+ lookupArgs+2);
+ slot->setOnlyReadsMemory();
+
+ imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+ } else {
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup_super");
+ imp = Builder.CreateCall(lookupFunction, lookupArgs, lookupArgs+2);
+ }
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+ return msgRet;
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ // Strip out message sends to retain / release in GC mode
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(Receiver);
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // If the return type is something that goes in an integer register, the
+ // runtime will handle 0 returns. For other cases, we fill in the 0 value
+ // ourselves.
+ //
+ // The language spec says the result of this kind of message send is
+ // undefined, but lots of people seem to have forgotten to read that
+ // paragraph and insist on sending messages to nil that have structure
+ // returns. With GCC, this generates a random return value (whatever happens
+ // to be on the stack / in those registers at the time) on most platforms,
+ // and generates a SegV on SPARC. With LLVM it corrupts the stack.
+ bool isPointerSizedReturn = false;
+ if (ResultType->isAnyPointerType() || ResultType->isIntegralType() ||
+ ResultType->isVoidType())
+ isPointerSizedReturn = true;
+
+ llvm::BasicBlock *startBB = 0;
+ llvm::BasicBlock *messageBB = 0;
+ llvm::BasicBlock *continueBB = 0;
+
+ if (!isPointerSizedReturn) {
+ startBB = Builder.GetInsertBlock();
+ messageBB = CGF.createBasicBlock("msgSend");
+ continueBB = CGF.createBasicBlock("continue");
+
+ llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
+ llvm::Constant::getNullValue(Receiver->getType()));
+ Builder.CreateCondBr(isNil, continueBB, messageBB);
+ CGF.EmitBlock(messageBB);
+ }
+
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ llvm::Value *cmd;
+ if (Method)
+ cmd = GetSelector(Builder, Method);
+ else
+ cmd = GetSelector(Builder, Sel);
+ CallArgList ActualArgs;
+
+ Receiver = Builder.CreateBitCast(Receiver, IdTy);
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(Receiver), ASTIdTy));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *impType =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), Class!=0)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+
+
+ llvm::Value *imp;
+ // For sender-aware dispatch, we pass the sender as the third argument to a
+ // lookup function. When sending messages from C code, the sender is nil.
+ // objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+
+ std::vector<const llvm::Type*> Params;
+ llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Builder.CreateStore(Receiver, ReceiverPtr);
+ Params.push_back(ReceiverPtr->getType());
+ Params.push_back(SelectorTy);
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+
+ Params.push_back(self->getType());
+
+ // The lookup function returns a slot, which can be safely cached.
+ llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
+ IntTy, llvm::PointerType::getUnqual(impType), NULL);
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(SlotTy), Params, true),
+ "objc_msg_lookup_sender");
+
+ // The lookup function is guaranteed not to capture the receiver pointer.
+ if (llvm::Function *LookupFn = dyn_cast<llvm::Function>(lookupFunction)) {
+ LookupFn->setDoesNotCapture(1);
+ }
+
+ llvm::CallInst *slot =
+ Builder.CreateCall3(lookupFunction, ReceiverPtr, cmd, self);
+ slot->setOnlyReadsMemory();
+ slot->setMetadata(msgSendMDKind, node);
+
+ imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+
+ // The lookup function may have changed the receiver, so make sure we use
+ // the new one.
+ ActualArgs[0] =
+ std::make_pair(RValue::get(Builder.CreateLoad(ReceiverPtr)), ASTIdTy);
+ } else {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Receiver->getType());
+ Params.push_back(SelectorTy);
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup");
+
+ imp = Builder.CreateCall2(lookupFunction, Receiver, cmd);
+ cast<llvm::CallInst>(imp)->setMetadata(msgSendMDKind, node);
+ }
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+
+
+ if (!isPointerSizedReturn) {
+ messageBB = CGF.Builder.GetInsertBlock();
+ CGF.Builder.CreateBr(continueBB);
+ CGF.EmitBlock(continueBB);
+ if (msgRet.isScalar()) {
+ llvm::Value *v = msgRet.getScalarVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
+ msgRet = RValue::get(phi);
+ } else if (msgRet.isAggregate()) {
+ llvm::Value *v = msgRet.getAggregateAddr();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ const llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
+ llvm::AllocaInst *NullVal =
+ CGF.CreateTempAlloca(RetTy->getElementType(), "null");
+ CGF.InitTempAlloca(NullVal,
+ llvm::Constant::getNullValue(RetTy->getElementType()));
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(NullVal, startBB);
+ msgRet = RValue::getAggregate(phi);
+ } else /* isComplex() */ {
+ std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v.first->getType());
+ phi->addIncoming(v.first, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
+ startBB);
+ llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType());
+ phi2->addIncoming(v.second, messageBB);
+ phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
+ startBB);
+ msgRet = RValue::getComplex(phi, phi2);
+ }
+ }
+ return msgRet;
+}
+
+/// Generates a MethodList. Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList) {
+ if (MethodSels.empty())
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ llvm::PointerType::getUnqual(IMPTy), //Method pointer
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ Elements.clear();
+ if (llvm::Constant *Method =
+ TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+ MethodSels[i].getAsString(),
+ isClassMethodList))) {
+ llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+ Elements.push_back(C);
+ Elements.push_back(MethodTypes[i]);
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ llvm::PointerType::getUnqual(IMPTy));
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+ }
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+ Methods.size());
+ llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+
+ // Structure containing list pointer, array and array count
+ llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
+ llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext);
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::get(VMContext,
+ NextPtrTy,
+ IntTy,
+ ObjCMethodArrayTy,
+ NULL);
+ // Refine next pointer type to concrete type
+ llvm::cast<llvm::OpaqueType>(
+ OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy);
+ ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
+
+ Methods.clear();
+ Methods.push_back(llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(ObjCMethodListTy)));
+ Methods.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ MethodTypes.size()));
+ Methods.push_back(MethodArray);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+}
+
+/// Generates an IvarList. Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
+ if (IvarNames.size() == 0)
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ IntTy,
+ NULL);
+ std::vector<llvm::Constant*> Ivars;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(IvarNames[i]);
+ Elements.push_back(IvarTypes[i]);
+ Elements.push_back(IvarOffsets[i]);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+ IvarNames.size());
+
+
+ Elements.clear();
+ Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+ Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+ // Structure containing array and array count
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(VMContext, IntTy,
+ ObjCIvarArrayTy,
+ NULL);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ bool isMeta) {
+ // Set up the class structure
+ // Note: Several of these are char*s when they should be ids. This is
+ // because the runtime performs this translation on load.
+ //
+ // Fields marked New ABI are part of the GNUstep runtime. We emit them
+ // anyway; the classes will still work with the GNU runtime, they will just
+ // be ignored.
+ llvm::StructType *ClassTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, // class_pointer
+ PtrToInt8Ty, // super_class
+ PtrToInt8Ty, // name
+ LongTy, // version
+ LongTy, // info
+ LongTy, // instance_size
+ IVars->getType(), // ivars
+ Methods->getType(), // methods
+ // These are all filled in by the runtime, so we pretend
+ PtrTy, // dtable
+ PtrTy, // subclass_list
+ PtrTy, // sibling_class
+ PtrTy, // protocols
+ PtrTy, // gc_object_type
+ // New ABI:
+ LongTy, // abi_version
+ IvarOffsets->getType(), // ivar_offsets
+ Properties->getType(), // properties
+ NULL);
+ llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+ // Fill in the structure
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+ Elements.push_back(SuperClass);
+ Elements.push_back(MakeConstantString(Name, ".class_name"));
+ Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+ Elements.push_back(InstanceSize);
+ Elements.push_back(IVars);
+ Elements.push_back(Methods);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+ Elements.push_back(NULLPtr);
+ Elements.push_back(Zero);
+ Elements.push_back(IvarOffsets);
+ Elements.push_back(Properties);
+ // Create an instance of the structure
+ // This is now an externally visible symbol, so that we can speed up class
+ // messages in the next ABI.
+ return MakeGlobal(ClassTy, Elements, (isMeta ? "_OBJC_METACLASS_":
+ "_OBJC_CLASS_") + std::string(Name), llvm::GlobalValue::ExternalLinkage);
+}
+
+llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+ PtrToInt8Ty,
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(MethodNames[i]);
+ Elements.push_back(MethodTypes[i]);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+ }
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+ MethodNames.size());
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(VMContext,
+ IntTy, ObjCMethodArrayTy, NULL);
+ Methods.clear();
+ Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+ Methods.push_back(Array);
+ return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+}
+
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols) {
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Protocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ LongTy,//FIXME: Should be size_t
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *protocol = 0;
+ llvm::StringMap<llvm::Constant*>::iterator value =
+ ExistingProtocols.find(*iter);
+ if (value == ExistingProtocols.end()) {
+ protocol = GenerateEmptyProtocol(*iter);
+ } else {
+ protocol = value->getValue();
+ }
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
+ PtrToInt8Ty);
+ Elements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Elements);
+ Elements.clear();
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+ Elements.push_back(ProtocolArray);
+ return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ const llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+ const std::string &ProtocolName) {
+ llvm::SmallVector<std::string, 0> EmptyStringVector;
+ llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+ llvm::Constant *MethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+ NonFragileProtocolVersion : ProtocolVersion;
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ ASTContext &Context = CGM.getContext();
+ std::string ProtocolName = PD->getNameAsString();
+ llvm::SmallVector<std::string, 16> Protocols;
+ for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
+ E = PD->protocol_end(); PI != E; ++PI)
+ Protocols.push_back((*PI)->getNameAsString());
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
+ for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
+ E = PD->instmeth_end(); iter != E; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ InstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalInstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+ // Collect information about class methods:
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+ for (ObjCProtocolDecl::classmeth_iterator
+ iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
+ iter != endIter ; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ ClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ llvm::Constant *OptionalInstanceMethodList =
+ GenerateProtocolMethodList(OptionalInstanceMethodNames,
+ OptionalInstanceMethodTypes);
+ llvm::Constant *OptionalClassMethodList =
+ GenerateProtocolMethodList(OptionalClassMethodNames,
+ OptionalClassMethodTypes);
+
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ // The isSynthesized value is always set to 0 in a protocol. It exists to
+ // simplify the runtime library by allowing it to use the same data
+ // structures for protocol metadata everywhere.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+ std::vector<llvm::Constant*> OptionalProperties;
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCContainerDecl::prop_iterator
+ iter = PD->prop_begin(), endIter = PD->prop_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
+ OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ } else {
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ }
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
+ PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
+ PropertyListInit, ".objc_property_list");
+
+ llvm::Constant *OptionalPropertyArray =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
+ OptionalProperties.size()) , OptionalProperties);
+ llvm::Constant* OptionalPropertyListInitFields[] = {
+ llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
+ OptionalPropertyArray };
+
+ llvm::Constant *OptionalPropertyListInit =
+ llvm::ConstantStruct::get(VMContext, OptionalPropertyListInitFields, 3, false);
+ llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
+ OptionalPropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
+ ".objc_property_list");
+
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ OptionalInstanceMethodList->getType(),
+ OptionalClassMethodList->getType(),
+ PropertyList->getType(),
+ OptionalPropertyList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+ NonFragileProtocolVersion : ProtocolVersion;
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ Elements.push_back(OptionalInstanceMethodList);
+ Elements.push_back(OptionalClassMethodList);
+ Elements.push_back(PropertyList);
+ Elements.push_back(OptionalPropertyList);
+ ExistingProtocols[ProtocolName] =
+ llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+ ".objc_protocol"), IdTy);
+}
+void CGObjCGNU::GenerateProtocolHolderCategory(void) {
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 1> MethodSels;
+ llvm::SmallVector<llvm::Constant*, 1> MethodTypes;
+
+ std::vector<llvm::Constant*> Elements;
+ const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
+ const std::string CategoryName = "AnotherHack";
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ // Protocol list
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
+ ExistingProtocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ LongTy,//FIXME: Should be size_t
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> ProtocolElements;
+ for (llvm::StringMapIterator<llvm::Constant*> iter =
+ ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
+ PtrTy);
+ ProtocolElements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ ProtocolElements);
+ ProtocolElements.clear();
+ ProtocolElements.push_back(NULLPtr);
+ ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
+ ExistingProtocols.size()));
+ ProtocolElements.push_back(ProtocolArray);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
+ ProtocolElements, ".objc_protocol_list"), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ std::string CategoryName = OCD->getNameAsString();
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ iter = OCD->instmeth_begin(), endIter = OCD->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
+ const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+ false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+ PtrTy));
+ // Protocol list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(
+ GenerateProtocolList(Protocols), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
+ llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+ llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+ ASTContext &Context = CGM.getContext();
+ //
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCImplDecl::propimpl_iterator
+ iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ ObjCPropertyImplDecl *propertyImpl = *iter;
+ bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, isSynthesized));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(getter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(setter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ llvm::ArrayType *PropertyArrayTy =
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
+ Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, PropertyListInit,
+ ".objc_property_list");
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ std::string SuperClassName;
+ if (SuperClassDecl) {
+ SuperClassName = SuperClassDecl->getNameAsString();
+ EmitClassRef(SuperClassName);
+ }
+
+ // Get the class name
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ // Emit the symbol that is used to generate linker errors if this class is
+ // referenced in other modules but not declared.
+ std::string classSymbolName = "__objc_class_name_" + ClassName;
+ if (llvm::GlobalVariable *symbol =
+ TheModule.getGlobalVariable(classSymbolName)) {
+ symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
+ } else {
+ new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
+ classSymbolName);
+ }
+
+ // Get the size of instances.
+ int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
+
+ // Collect information about instance variables.
+ llvm::SmallVector<llvm::Constant*, 16> IvarNames;
+ llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
+ llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+ std::vector<llvm::Constant*> IvarOffsetValues;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
+ // For non-fragile ivars, set the instance size to 0 - {the size of just this
+ // class}. The runtime will then set this to the correct value on load.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ instanceSize = 0 - (instanceSize - superInstanceSize);
+ }
+
+ // Collect declared and synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ CGM.getContext().ShallowCollectObjCIvars(ClassDecl, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ // Store the name
+ IvarNames.push_back(MakeConstantString(IVD->getNameAsString()));
+ // Get the type encoding for this ivar
+ std::string TypeStr;
+ Context.getObjCEncodingForType(IVD->getType(), TypeStr);
+ IvarTypes.push_back(MakeConstantString(TypeStr));
+ // Get the offset
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
+ uint64_t Offset = BaseOffset;
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Offset = BaseOffset - superInstanceSize;
+ }
+ IvarOffsets.push_back(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
+ IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ llvm::ConstantInt::get(IntTy, BaseOffset),
+ "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString()));
+ }
+ llvm::Constant *IvarOffsetArrayInit =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PtrToIntTy,
+ IvarOffsetValues.size()), IvarOffsetValues);
+ llvm::GlobalVariable *IvarOffsetArray = new llvm::GlobalVariable(TheModule,
+ IvarOffsetArrayInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, IvarOffsetArrayInit,
+ ".ivar.offsets");
+
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCImplementationDecl::instmeth_iterator
+ iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
+ InstanceMethodTypes);
+
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCImplementationDecl::classmeth_iterator
+ iter = OID->classmeth_begin(), endIter = OID->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+
+
+ // Get the superclass pointer.
+ llvm::Constant *SuperClass;
+ if (!SuperClassName.empty()) {
+ SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+ } else {
+ SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ }
+ // Empty vector used to construct empty method lists
+ llvm::SmallVector<llvm::Constant*, 1> empty;
+ // Generate the method and instance variable lists
+ llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+ InstanceMethodSels, InstanceMethodTypes, false);
+ llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+ ClassMethodSels, ClassMethodTypes, true);
+ llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+ IvarOffsets);
+ // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+ // we emit a symbol containing the offset for each ivar in the class. This
+ // allows code compiled for the non-Fragile ABI to inherit from code compiled
+ // for the legacy ABI, without causing problems. The converse is also
+ // possible, but causes all ivar accesses to be fragile.
+ int i = 0;
+ // Offset pointer for getting at the correct field in the ivar list when
+ // setting up the alias. These are: The base address for the global, the
+ // ivar array (second field), the ivar in this list (set for each ivar), and
+ // the offset (third field in ivar structure)
+ const llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
+ llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
+ llvm::ConstantInt::get(IndexTy, 1), 0,
+ llvm::ConstantInt::get(IndexTy, 2) };
+
+ for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
+ endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+ const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+ +(*iter)->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i++);
+ // Get the correct ivar field
+ llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
+ IvarList, offsetPointerIndexes, 4);
+ // Get the existing alias, if one exists.
+ llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
+ if (offset) {
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ }
+ }
+ //Generate metaclass for class methods
+ llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+ NULLPtr, 0x12L, ClassName.c_str(), 0, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr, true);
+
+ // Generate the class structure
+ llvm::Constant *ClassStruct =
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L,
+ ClassName.c_str(), 0,
+ llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+ MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
+ Properties);
+
+ // Resolve the class aliases, if they exist.
+ if (ClassPtrAlias) {
+ ClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias = 0;
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias = 0;
+ }
+
+ // Add class structure to list to be added to the symtab later
+ ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+ Classes.push_back(ClassStruct);
+}
+
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+ // Only emit an ObjC load function if no Objective-C stuff has been called
+ if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+ ExistingProtocols.empty() && TypedSelectors.empty() &&
+ UntypedSelectors.empty())
+ return NULL;
+
+ // Add all referenced protocols to a category.
+ GenerateProtocolHolderCategory();
+
+ const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ SelectorTy->getElementType());
+ const llvm::Type *SelStructPtrTy = SelectorTy;
+ bool isSelOpaque = false;
+ if (SelStructTy == 0) {
+ SelStructTy = llvm::StructType::get(VMContext, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ isSelOpaque = true;
+ }
+
+ // Name the ObjC types to make the IR a bit easier to read
+ TheModule.addTypeName(".objc_selector", SelStructPtrTy);
+ TheModule.addTypeName(".objc_id", IdTy);
+ TheModule.addTypeName(".objc_imp", IMPTy);
+
+ std::vector<llvm::Constant*> Elements;
+ llvm::Constant *Statics = NULLPtr;
+ // Generate statics list:
+ if (ConstantStrings.size()) {
+ llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ ConstantStrings.size() + 1);
+ ConstantStrings.push_back(NULLPtr);
+
+ llvm::StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+ if (StringClass.empty()) StringClass = "NXConstantString";
+ Elements.push_back(MakeConstantString(StringClass,
+ ".objc_static_class_name"));
+ Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+ ConstantStrings));
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(VMContext, PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::Type *StaticsListPtrTy =
+ llvm::PointerType::getUnqual(StaticsListTy);
+ Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType::get(StaticsListPtrTy, 2);
+ Elements.clear();
+ Elements.push_back(Statics);
+ Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+ Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+ }
+ // Array of classes, categories, and constant objects
+ llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Classes.size() + Categories.size() + 2);
+ llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
+ LongTy, SelStructPtrTy,
+ llvm::Type::getInt16Ty(VMContext),
+ llvm::Type::getInt16Ty(VMContext),
+ ClassListTy, NULL);
+
+ Elements.clear();
+ // Pointer to an array of selectors used in this module.
+ std::vector<llvm::Constant*> Selectors;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end();
+ iter != iterEnd ; ++iter) {
+ Elements.push_back(ExportUniqueString(iter->first.first, ".objc_sel_name"));
+ Elements.push_back(MakeConstantString(iter->first.second,
+ ".objc_sel_types"));
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ Elements.push_back(
+ ExportUniqueString(iter->getKeyData(), ".objc_sel_name"));
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ // Number of static selectors
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() ));
+ llvm::Constant *SelectorList = MakeGlobal(
+ llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
+ ".objc_selector_list");
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ SelStructPtrTy));
+
+ // Now that all of the static selectors exist, create pointers to them.
+ int index = 0;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
+ true, llvm::GlobalValue::LinkOnceODRLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ MangleSelectorTypes(".objc_sel_ptr"+iter->first.first+"."+
+ iter->first.second));
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->replaceAllUsesWith(SelPtr);
+ (*iter).second->eraseFromParent();
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; iter++) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
+ true, llvm::GlobalValue::LinkOnceODRLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ MangleSelectorTypes(std::string(".objc_sel_ptr")+iter->getKey().str()));
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->replaceAllUsesWith(SelPtr);
+ (*iter).second->eraseFromParent();
+ }
+ // Number of classes defined.
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Classes.size()));
+ // Number of categories defined
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Categories.size()));
+ // Create an array of classes, then categories, then static object instances
+ Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+ // NULL-terminated list of static object instances (mainly constant strings)
+ Classes.push_back(Statics);
+ Classes.push_back(NULLPtr);
+ llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+ Elements.push_back(ClassList);
+ // Construct the symbol table
+ llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+
+ // The symbol table is contained in a module which has some version-checking
+ // constants
+ llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
+ Elements.clear();
+ // Runtime version used for compatibility checking.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ NonFragileRuntimeVersion));
+ } else {
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+ }
+ // sizeof(ModuleTy)
+ llvm::TargetData td(&TheModule);
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ModuleTy)/8));
+ //FIXME: Should be the path to the file where this module was declared
+ Elements.push_back(NULLPtr);
+ Elements.push_back(SymTab);
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+
+ // Create the load function calling the runtime entry point with the module
+ // structure
+ llvm::Function * LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
+ llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+ &TheModule);
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+ CGBuilderTy Builder(VMContext);
+ Builder.SetInsertPoint(EntryBB);
+
+ std::vector<const llvm::Type*> Params(1,
+ llvm::PointerType::getUnqual(ModuleTy));
+ llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getVoidTy(VMContext), Params, true), "__objc_exec_class");
+ Builder.CreateCall(Register, Module);
+ Builder.CreateRetVoid();
+
+ return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ std::string CategoryName = OCD ? OCD->getNameAsString() : "";
+ std::string ClassName = CD->getName();
+ std::string MethodName = OMD->getSelector().getAsString();
+ bool isClassMethod = !OMD->isInstanceMethod();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+ MethodName, isClassMethod);
+
+ llvm::Function *Method
+ = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
+ return Method;
+}
+
+llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ Params.push_back(IntTy);
+ Params.push_back(BoolTy);
+ // void objc_getProperty (id, SEL, int, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(IdTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getProperty"));
+}
+
+llvm::Function *CGObjCGNU::GetPropertySetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ Params.push_back(IntTy);
+ Params.push_back(IdTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // void objc_setProperty (id, SEL, int, id, bool, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setProperty"));
+}
+
+// FIXME. Implement this.
+llvm::Function *CGObjCGNU::GetCopyStructFunction() {
+ return 0;
+}
+
+llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ llvm::SmallVector<CanQualType,1> Params;
+ Params.push_back(ASTIdTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()), false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+}
+
+void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ // Pointer to the personality function
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+ true),
+ "__gnu_objc_personality_v0");
+ Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ llvm::Value *RethrowFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false), "_Unwind_Resume");
+
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow");
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // @synchronized()
+ if (!isTry) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncEnter, SyncArg);
+ }
+
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ // Emit the statements in the @try {} block
+ CGF.setInvokeDest(TryHandler);
+
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+ // Jump to @finally if there is no exception
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the handlers
+ CGF.EmitBlock(TryHandler);
+
+ // Get the correct versions of the exception handling intrinsics
+ llvm::Value *llvm_eh_exception =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ llvm::Value *llvm_eh_typeid_for =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // Exception object
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ llvm::SmallVector<llvm::Value*, 8> ESelArgs;
+ llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
+
+ ESelArgs.push_back(Exc);
+ ESelArgs.push_back(Personality);
+
+ bool HasCatchAll = false;
+ // Only @try blocks are allowed @catch blocks, but both can have @finally
+ if (isTry) {
+ if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
+ CGF.setInvokeDest(CatchInCatch);
+
+ for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ Handlers.push_back(std::make_pair(CatchDecl,
+ CatchStmt->getCatchBody()));
+
+ // @catch() and @catch(id) both catch any ObjC exception
+ if (!CatchDecl || CatchDecl->getType()->isObjCIdType()
+ || CatchDecl->getType()->isObjCQualifiedIdType()) {
+ // Use i8* null here to signal this is a catch all, not a cleanup.
+ ESelArgs.push_back(NULLPtr);
+ HasCatchAll = true;
+ // No further catches after this one will ever by reached
+ break;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ llvm::Value *EHType =
+ MakeConstantString(IDecl->getNameAsString());
+ ESelArgs.push_back(EHType);
+ }
+ }
+ }
+
+ // We use a cleanup unless there was already a catch all.
+ if (!HasCatchAll) {
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
+ Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ }
+
+ // Find which handler was matched.
+ llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector,
+ ESelArgs.begin(), ESelArgs.end(), "selector");
+
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ const VarDecl *CatchParam = Handlers[i].first;
+ const Stmt *CatchBody = Handlers[i].second;
+
+ llvm::BasicBlock *Next = 0;
+
+ // The last handler always matches.
+ if (i + 1 != e) {
+ assert(CatchParam && "Only last handler can be a catch all.");
+
+ // Test whether this block matches the type for the selector and branch
+ // to Match if it does, or to the next BB if it doesn't.
+ llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+ Next = CGF.createBasicBlock("catch.next");
+ llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for,
+ CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy));
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match,
+ Next);
+
+ CGF.EmitBlock(Match);
+ }
+
+ if (CatchBody) {
+ llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
+ CGF.ConvertType(CatchParam->getType()));
+
+ // Bind the catch parameter if it exists.
+ if (CatchParam) {
+ // CatchParam is a ParmVarDecl because of the grammar
+ // construction used to handle this, but for codegen purposes
+ // we treat this as a local decl.
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(ExcObject);
+ CGF.EmitStmt(CatchBody);
+ CGF.ObjCEHValueStack.pop_back();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ if (Next)
+ CGF.EmitBlock(Next);
+ } else {
+ assert(!Next && "catchup should be last handler.");
+
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+ // The @finally block is a secondary landing pad for any exceptions thrown in
+ // @catch() blocks
+ CGF.EmitBlock(CatchInCatch);
+ Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ ESelArgs.clear();
+ ESelArgs.push_back(Exc);
+ ESelArgs.push_back(Personality);
+ // If there is a @catch or @finally clause in outside of this one then we
+ // need to make sure that we catch and rethrow it.
+ if (PrevLandingPad) {
+ ESelArgs.push_back(NULLPtr);
+ } else {
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
+ }
+ CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
+ "selector");
+ CGF.Builder.CreateCall(llvm_eh_typeid_for,
+ CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy));
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.setInvokeDest(PrevLandingPad);
+
+ CGF.EmitBlock(FinallyBlock);
+
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+ // @synchronized.
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncExit, SyncArg);
+ }
+
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ // Branch around the rethrow code.
+ CGF.EmitBranch(FinallyEnd);
+
+ CGF.EmitBlock(FinallyRethrow);
+
+ llvm::Value *ExceptionObject = CGF.Builder.CreateLoad(RethrowPtr);
+ llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+ if (!UnwindBB) {
+ CGF.Builder.CreateCall(RethrowFn, ExceptionObject);
+ // Exception always thrown, next instruction is never reached.
+ CGF.Builder.CreateUnreachable();
+ } else {
+ // If there is a @catch block outside this scope, we invoke instead of
+ // calling because we may return to this function. This is very slow, but
+ // some people still do it. It would be nice to add an optimised path for
+ // this.
+ CGF.Builder.CreateInvoke(RethrowFn, UnwindBB, UnwindBB, &ExceptionObject,
+ &ExceptionObject+1);
+ }
+
+ CGF.EmitBlock(FinallyEnd);
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Value *ThrowFn =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject = Exception;
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
+
+ // Note: This may have to be an invoke, if we want to support constructs like:
+ // @try {
+ // @throw(obj);
+ // }
+ // @catch(id) ...
+ //
+ // This is effectively turning @throw into an incredibly-expensive goto, but
+ // it may happen as a result of inlining followed by missed optimizations, or
+ // as a result of stupidity.
+ llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+ if (!UnwindBB) {
+ CGF.Builder.CreateCall(ThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateInvoke(ThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
+ &ExceptionAsObject+1);
+ }
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ CGBuilderTy B = CGF.Builder;
+ AddrWeakObj = EnforceType(B, AddrWeakObj, IdTy);
+ return B.CreateCall(WeakReadFn, AddrWeakObj);
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(WeakAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(GlobalAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall3(IvarAssignFn, src, dst, ivarOffset);
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(StrongCastAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ QualType Ty) {
+ CGBuilderTy B = CGF.Builder;
+ DestPtr = EnforceType(B, DestPtr, IdTy);
+ SrcPtr = EnforceType(B, SrcPtr, PtrToIdTy);
+
+ std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
+ unsigned long size = TypeInfo.first/8;
+ // FIXME: size_t
+ llvm::Value *N = llvm::ConstantInt::get(LongTy, size);
+
+ B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, N);
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ // Emit the variable and initialize it with what we think the correct value
+ // is. This allows code compiled with non-fragile ivars to work correctly
+ // when linked against code which isn't (most of the time).
+ llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+ if (!IvarOffsetPointer) {
+ uint64_t Offset;
+ if (ObjCImplementationDecl *OID =
+ CGM.getContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl *>(ID)))
+ Offset = ComputeIvarBaseOffset(CGM, OID, Ivar);
+ else
+ Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+
+ llvm::ConstantInt *OffsetGuess =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset, "ivar");
+ // Don't emit the guess in non-PIC code because the linker will not be able
+ // to replace it with the real version for a library. In non-PIC code you
+ // must compile with the fragile ABI if you want to use ivars from a
+ // GCC-compiled class.
+ if (CGM.getLangOptions().PICLevel) {
+ llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32Ty(VMContext), false,
+ llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ IvarOffsetGV, Name);
+ } else {
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32PtrTy(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+ }
+ return IvarOffsetPointer;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD) {
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ Context.ShallowCollectObjCIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (OIVD == Ivars[k])
+ return OID;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD);
+
+ return 0;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ if (CGM.getLangOptions().ObjCNonFragileABI) {
+ Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+ return CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar"));
+ }
+ uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(LongTy, Offset, "ivar");
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCGNU(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..d3bafd7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,5910 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+// Common CGObjCRuntime functions, these don't belong here, but they
+// don't belong in CGObjCRuntime either so we will live with it for
+// now.
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+
+ // FIXME: We should eliminate the need to have ObjCImplementationDecl passed
+ // in here; it should never be necessary because that should be the lexical
+ // decl context for the ivar.
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && ID->getClassInterface() == Container)
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+
+ // Compute field index.
+ //
+ // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
+ // implemented. This should be fixed to get the information from the layout
+ // directly.
+ unsigned Index = 0;
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().ShallowCollectObjCIvars(Container, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (Ivar == Ivars[k])
+ break;
+ ++Index;
+ }
+ assert(Index != Ivars.size() && "Ivar is not inside container!");
+
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 8;
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 8;
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ QualType IvarTy = Ivar->getType();
+ const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+ V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+ Qualifiers Quals = CGF.MakeQualifiers(IvarTy);
+ Quals.addCVRQualifiers(CVRQualifiers);
+
+ if (!Ivar->isBitField())
+ return LValue::MakeAddr(V, Quals);
+
+ // We need to compute the bit offset for the bit-field, the offset is to the
+ // byte. Note, there is a subtle invariant here: we can only call this routine
+ // on non-synthesized ivars but we may be called for synthesized ivars.
+ // However, a synthesized ivar can never be a bit-field, so this is safe.
+ uint64_t BitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar) % 8;
+ uint64_t BitFieldSize =
+ Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
+
+ // Allocate a new CGBitFieldInfo object to describe this access.
+ //
+ // FIXME: This is incredibly wasteful, these should be uniqued or part of some
+ // layout object. However, this is blocked on other cleanups to the
+ // Objective-C code, so for now we just live with allocating a bunch of these
+ // objects.
+
+ // We always construct a single, possibly unaligned, access for this case.
+ CGBitFieldInfo::AccessInfo AI;
+ AI.FieldIndex = 0;
+ AI.FieldByteOffset = 0;
+ AI.FieldBitStart = BitOffset;
+ AI.AccessWidth = CGF.CGM.getContext().getTypeSize(IvarTy);
+ AI.AccessAlignment = 0;
+ AI.TargetBitOffset = 0;
+ AI.TargetBitWidth = BitFieldSize;
+
+ CGBitFieldInfo *Info =
+ new (CGF.CGM.getContext()) CGBitFieldInfo(BitFieldSize, 1, &AI,
+ IvarTy->isSignedIntegerType());
+
+ // FIXME: We need to set a very conservative alignment on this, or make sure
+ // that the runtime is doing the right thing.
+ return LValue::MakeBitfield(V, *Info, Quals.getCVRQualifiers());
+}
+
+///
+
+namespace {
+
+typedef std::vector<llvm::Constant*> ConstantVector;
+
+// FIXME: We should find a nicer way to make the labels for metadata, string
+// concatenation is lame.
+
+class ObjCCommonTypesHelper {
+protected:
+ llvm::LLVMContext &VMContext;
+
+private:
+ llvm::Constant *getMessageSendFn() const {
+ // id objc_msgSend (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend");
+ }
+
+ llvm::Constant *getMessageSendStretFn() const {
+ // id objc_msgSend_stret (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSend_stret");
+
+ }
+
+ llvm::Constant *getMessageSendFpretFn() const {
+ // FIXME: This should be long double on x86_64?
+ // [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getDoubleTy(VMContext),
+ Params,
+ true),
+ "objc_msgSend_fpret");
+
+ }
+
+ llvm::Constant *getMessageSendSuperFn() const {
+ // id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperFn2() const {
+ // id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper2";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn() const {
+ // void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSendSuper_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn2() const {
+ // void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSendSuper2_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn();
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn2() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn2();
+ }
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+
+public:
+ const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ const llvm::Type *Int8PtrTy;
+
+ /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+ const llvm::Type *ObjectPtrTy;
+
+ /// PtrObjectPtrTy - LLVM type for id *
+ const llvm::Type *PtrObjectPtrTy;
+
+ /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+ const llvm::Type *SelectorPtrTy;
+ /// ProtocolPtrTy - LLVM type for external protocol handles
+ /// (typeof(Protocol))
+ const llvm::Type *ExternalProtocolPtrTy;
+
+ // SuperCTy - clang type for struct objc_super.
+ QualType SuperCTy;
+ // SuperPtrCTy - clang type for struct objc_super *.
+ QualType SuperPtrCTy;
+
+ /// SuperTy - LLVM type for struct objc_super.
+ const llvm::StructType *SuperTy;
+ /// SuperPtrTy - LLVM type for struct objc_super *.
+ const llvm::Type *SuperPtrTy;
+
+ /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+ /// in GCC parlance).
+ const llvm::StructType *PropertyTy;
+
+ /// PropertyListTy - LLVM type for struct objc_property_list
+ /// (_prop_list_t in GCC parlance).
+ const llvm::StructType *PropertyListTy;
+ /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *PropertyListPtrTy;
+
+ // MethodTy - LLVM type for struct objc_method.
+ const llvm::StructType *MethodTy;
+
+ /// CacheTy - LLVM type for struct objc_cache.
+ const llvm::Type *CacheTy;
+ /// CachePtrTy - LLVM type for struct objc_cache *.
+ const llvm::Type *CachePtrTy;
+
+ llvm::Constant *getGetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+ llvm::SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+ }
+
+ llvm::Constant *getSetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ llvm::SmallVector<CanQualType,6> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+ }
+
+
+ llvm::Constant *getCopyStructFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_copyStruct (void *, const void *, size_t, bool, bool)
+ llvm::SmallVector<CanQualType,5> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
+ }
+
+ llvm::Constant *getEnumerationMutationFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ llvm::SmallVector<CanQualType,1> Params;
+ Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ }
+
+ /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+ llvm::Constant *getGcReadWeakFn() {
+ // id objc_read_weak (id *)
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ }
+
+ /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+ llvm::Constant *getGcAssignWeakFn() {
+ // id objc_assign_weak (id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ }
+
+ /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+ llvm::Constant *getGcAssignGlobalFn() {
+ // id objc_assign_global(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ }
+
+ /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+ llvm::Constant *getGcAssignIvarFn() {
+ // id objc_assign_ivar(id, id *, ptrdiff_t)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ }
+
+ /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
+ llvm::Constant *GcMemmoveCollectableFn() {
+ // void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+ Args.push_back(Int8PtrTy);
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+
+ /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+ llvm::Constant *getGcAssignStrongCastFn() {
+ // id objc_assign_global(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ }
+
+ /// ExceptionThrowFn - LLVM objc_exception_throw function.
+ llvm::Constant *getExceptionThrowFn() {
+ // void objc_exception_throw(id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+ }
+
+ /// SyncEnterFn - LLVM object_sync_enter function.
+ llvm::Constant *getSyncEnterFn() {
+ // void objc_sync_enter (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ }
+
+ /// SyncExitFn - LLVM object_sync_exit function.
+ llvm::Constant *getSyncExitFn() {
+ // void objc_sync_exit (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ }
+
+ llvm::Constant *getSendFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendStretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+ }
+
+ ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCCommonTypesHelper(){}
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+ /// SymtabTy - LLVM type for struct objc_symtab.
+ const llvm::StructType *SymtabTy;
+ /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+ const llvm::Type *SymtabPtrTy;
+ /// ModuleTy - LLVM type for struct objc_module.
+ const llvm::StructType *ModuleTy;
+
+ /// ProtocolTy - LLVM type for struct objc_protocol.
+ const llvm::StructType *ProtocolTy;
+ /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+ const llvm::Type *ProtocolPtrTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension.
+ const llvm::StructType *ProtocolExtensionTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension *.
+ const llvm::Type *ProtocolExtensionPtrTy;
+ /// MethodDescriptionTy - LLVM type for struct
+ /// objc_method_description.
+ const llvm::StructType *MethodDescriptionTy;
+ /// MethodDescriptionListTy - LLVM type for struct
+ /// objc_method_description_list.
+ const llvm::StructType *MethodDescriptionListTy;
+ /// MethodDescriptionListPtrTy - LLVM type for struct
+ /// objc_method_description_list *.
+ const llvm::Type *MethodDescriptionListPtrTy;
+ /// ProtocolListTy - LLVM type for struct objc_property_list.
+ const llvm::Type *ProtocolListTy;
+ /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *ProtocolListPtrTy;
+ /// CategoryTy - LLVM type for struct objc_category.
+ const llvm::StructType *CategoryTy;
+ /// ClassTy - LLVM type for struct objc_class.
+ const llvm::StructType *ClassTy;
+ /// ClassPtrTy - LLVM type for struct objc_class *.
+ const llvm::Type *ClassPtrTy;
+ /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+ const llvm::StructType *ClassExtensionTy;
+ /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+ const llvm::Type *ClassExtensionPtrTy;
+ // IvarTy - LLVM type for struct objc_ivar.
+ const llvm::StructType *IvarTy;
+ /// IvarListTy - LLVM type for struct objc_ivar_list.
+ const llvm::Type *IvarListTy;
+ /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+ const llvm::Type *IvarListPtrTy;
+ /// MethodListTy - LLVM type for struct objc_method_list.
+ const llvm::Type *MethodListTy;
+ /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+ const llvm::Type *MethodListPtrTy;
+
+ /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+ const llvm::Type *ExceptionDataTy;
+
+ /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+ llvm::Constant *getExceptionTryEnterFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ "objc_exception_try_enter");
+ }
+
+ /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+ llvm::Constant *getExceptionTryExitFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ "objc_exception_try_exit");
+ }
+
+ /// ExceptionExtractFn - LLVM objc_exception_extract function.
+ llvm::Constant *getExceptionExtractFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, false),
+ "objc_exception_extract");
+
+ }
+
+ /// ExceptionMatchFn - LLVM objc_exception_match function.
+ llvm::Constant *getExceptionMatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ClassPtrTy);
+ Params.push_back(ObjectPtrTy);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+ Params, false),
+ "objc_exception_match");
+
+ }
+
+ /// SetJmpFn - LLVM _setjmp function.
+ llvm::Constant *getSetJmpFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::Type::getInt32PtrTy(VMContext));
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+ Params, false),
+ "_setjmp");
+
+ }
+
+public:
+ ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCTypesHelper() {}
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+ // MethodListnfABITy - LLVM for struct _method_list_t
+ const llvm::StructType *MethodListnfABITy;
+
+ // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+ const llvm::Type *MethodListnfABIPtrTy;
+
+ // ProtocolnfABITy = LLVM for struct _protocol_t
+ const llvm::StructType *ProtocolnfABITy;
+
+ // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+ const llvm::Type *ProtocolnfABIPtrTy;
+
+ // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+ const llvm::StructType *ProtocolListnfABITy;
+
+ // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+ const llvm::Type *ProtocolListnfABIPtrTy;
+
+ // ClassnfABITy - LLVM for struct _class_t
+ const llvm::StructType *ClassnfABITy;
+
+ // ClassnfABIPtrTy - LLVM for struct _class_t*
+ const llvm::Type *ClassnfABIPtrTy;
+
+ // IvarnfABITy - LLVM for struct _ivar_t
+ const llvm::StructType *IvarnfABITy;
+
+ // IvarListnfABITy - LLVM for struct _ivar_list_t
+ const llvm::StructType *IvarListnfABITy;
+
+ // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+ const llvm::Type *IvarListnfABIPtrTy;
+
+ // ClassRonfABITy - LLVM for struct _class_ro_t
+ const llvm::StructType *ClassRonfABITy;
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ const llvm::Type *ImpnfABITy;
+
+ // CategorynfABITy - LLVM for struct _category_t
+ const llvm::StructType *CategorynfABITy;
+
+ // New types for nonfragile abi messaging.
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *MessageRefTy;
+ // MessageRefCTy - clang type for struct _message_ref_t
+ QualType MessageRefCTy;
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ const llvm::Type *MessageRefPtrTy;
+ // MessageRefCPtrTy - clang type for struct _message_ref_t*
+ QualType MessageRefCPtrTy;
+
+ // MessengerTy - Type of the messenger (shown as IMP above)
+ const llvm::FunctionType *MessengerTy;
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *SuperMessageRefTy;
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ const llvm::Type *SuperMessageRefPtrTy;
+
+ llvm::Constant *getMessageSendFixupFn() {
+ // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fixup");
+ }
+
+ llvm::Constant *getMessageSendFpretFixupFn() {
+ // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fpret_fixup");
+ }
+
+ llvm::Constant *getMessageSendStretFixupFn() {
+ // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_stret_fixup");
+ }
+
+ llvm::Constant *getMessageSendIdFixupFn() {
+ // id objc_msgSendId_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendId_fixup");
+ }
+
+ llvm::Constant *getMessageSendIdStretFixupFn() {
+ // id objc_msgSendId_stret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendId_stret_fixup");
+ }
+ llvm::Constant *getMessageSendSuper2FixupFn() {
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_stret_fixup");
+ }
+
+
+
+ /// EHPersonalityPtr - LLVM value for an i8* to the Objective-C
+ /// exception personality function.
+ llvm::Value *getEHPersonalityPtr() {
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+ true),
+ "__objc_personality_v0");
+ return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy);
+ }
+
+ llvm::Constant *getUnwindResumeOrRethrowFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ (CGM.getLangOptions().SjLjExceptions ? "_Unwind_SjLj_Resume" :
+ "_Unwind_Resume_or_Rethrow"));
+ }
+
+ llvm::Constant *getObjCEndCatchFn() {
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false),
+ "objc_end_catch");
+
+ }
+
+ llvm::Constant *getObjCBeginCatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+ Params, false),
+ "objc_begin_catch");
+ }
+
+ const llvm::StructType *EHTypeTy;
+ const llvm::Type *EHTypePtrTy;
+
+ ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCNonFragileABITypesHelper(){}
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+ // FIXME - accessibility
+ class GC_IVAR {
+ public:
+ unsigned ivar_bytepos;
+ unsigned ivar_size;
+ GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
+ : ivar_bytepos(bytepos), ivar_size(size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const GC_IVAR &b) const {
+ return ivar_bytepos < b.ivar_bytepos;
+ }
+ };
+
+ class SKIP_SCAN {
+ public:
+ unsigned skip;
+ unsigned scan;
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ : skip(_skip), scan(_scan) {}
+ };
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+ llvm::LLVMContext &VMContext;
+ // FIXME! May not be needing this after all.
+ unsigned ObjCABI;
+
+ // gc ivar layout bitmap calculation helper caches.
+ llvm::SmallVector<GC_IVAR, 16> SkipIvars;
+ llvm::SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ /// LazySymbols - Symbols to generate a lazy reference for. See
+ /// DefinedSymbols and FinishModule().
+ llvm::SetVector<IdentifierInfo*> LazySymbols;
+
+ /// DefinedSymbols - External symbols which are defined by this
+ /// module. The symbols in this list and LazySymbols are used to add
+ /// special linker symbols which ensure that Objective-C modules are
+ /// linked properly.
+ llvm::SetVector<IdentifierInfo*> DefinedSymbols;
+
+ /// ClassNames - uniqued class names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
+
+ /// MethodVarNames - uniqued method variable names.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+ /// MethodVarTypes - uniqued method type signatures. We have to use
+ /// a StringMap here because have no other unique reference.
+ llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+ /// MethodDefinitions - map of methods which have been defined in
+ /// this translation unit.
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+ /// PropertyNames - uniqued method variable names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+ /// ClassReferences - uniqued class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+ /// SelectorReferences - uniqued selector references.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+ /// Protocols - Protocols for which an objc_protocol structure has
+ /// been emitted. Forward declarations are handled by creating an
+ /// empty structure whose initializer is filled in when/if defined.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+ /// DefinedProtocols - Protocols which have actually been
+ /// defined. We should not need this, see FIXME in GenerateProtocol.
+ llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+ /// DefinedClasses - List of defined classes.
+ std::vector<llvm::GlobalValue*> DefinedClasses;
+
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
+
+ /// DefinedCategories - List of defined categories.
+ std::vector<llvm::GlobalValue*> DefinedCategories;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
+
+ /// GetNameForMethod - Return a name for the given method.
+ /// \param[out] NameOut - The return value.
+ void GetNameForMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ llvm::SmallVectorImpl<char> &NameOut);
+
+ /// GetMethodVarName - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+ llvm::Constant *GetMethodVarName(Selector Sel);
+ llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+ llvm::Constant *GetMethodVarName(const std::string &Name);
+
+ /// GetMethodVarType - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+
+ // FIXME: This is a horrible name.
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
+ llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+ /// GetPropertyName - Return a unique constant for the given
+ /// name. The return value has type char *.
+ llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+ // FIXME: This can be dropped once string functions are unified.
+ llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container);
+
+ /// GetClassName - Return a unique constant for the given selector's
+ /// name. The return value has type char *.
+ llvm::Constant *GetClassName(IdentifierInfo *Ident);
+
+ /// BuildIvarLayout - Builds ivar layout bitmap for the class
+ /// implementation for the __strong or __weak case.
+ ///
+ llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+ bool ForStrongLayout);
+
+ void BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+ void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+
+ /// GetIvarLayoutName - Returns a unique constant for the given
+ /// ivar layout bitmap.
+ llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitPropertyList - Emit the given property list. The return
+ /// value has type PropertyListPtrTy.
+ llvm::Constant *EmitPropertyList(llvm::Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// PushProtocolProperties - Push protocol's property on the input stack.
+ void PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ std::vector<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// GetProtocolRef - Return a reference to the internal protocol
+ /// description, creating an empty one if it has not been
+ /// defined. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// CreateMetadataVar - Create a global variable with internal
+ /// linkage for use by the Objective-C runtime.
+ ///
+ /// This is a convenience wrapper which not only creates the
+ /// variable, but also sets the section and alignment and adds the
+ /// global to the "llvm.used" list.
+ ///
+ /// \param Name - The variable name.
+ /// \param Init - The variable initializer; this is also used to
+ /// define the type of the variable.
+ /// \param Section - The section the variable should go into, or 0.
+ /// \param Align - The alignment for the variable, or 0.
+ /// \param AddToUsed - Whether the variable should be added to
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(llvm::Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed);
+
+ CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *OMD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
+public:
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
+ CGM(cgm), VMContext(cgm.getLLVMContext()) { }
+
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD=0);
+
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+ ObjCTypesHelper ObjCTypes;
+
+ /// EmitModuleInfo - Another marker encoding module level
+ /// information.
+ void EmitModuleInfo();
+
+ /// EmitModuleSymols - Emit module symbols, the list of defined
+ /// classes and categories. The result has type SymtabPtrTy.
+ llvm::Constant *EmitModuleSymbols();
+
+ /// FinishModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishModule();
+
+ /// EmitClassExtension - Generate the class extension structure used
+ /// to store the weak ivar layout and properties. The return value
+ /// has type ClassExtensionPtrTy.
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitSuperClassRef - Emits reference to class's main metadata class.
+ llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs);
+
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass);
+
+ /// EmitMetaClass - Emit a forward reference to the class structure
+ /// for the metaclass of the given interface. The return value has
+ /// type ClassPtrTy.
+ llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClass - Emit a class structure for the metaclass of the
+ /// given implementation. The return value has type ClassPtrTy.
+ llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListPtrTy.
+ llvm::Constant *EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// EmitMethodDescList - Emit a method description list for a list of
+ /// method declarations.
+ /// - TypeName: The name for the type containing the methods.
+ /// - IsProtocol: True iff these methods are for a protocol.
+ /// - ClassMethds: True iff these are class methods.
+ /// - Required: When true, only "required" methods are
+ /// listed. Similarly, when false only "optional" methods are
+ /// listed. For classes this should always be true.
+ /// - begin, end: The method list to output.
+ ///
+ /// The return value has type MethodDescriptionListPtrTy.
+ llvm::Constant *EmitMethodDescList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolExtension - Generate the protocol extension
+ /// structure used to store optional instance and class methods, and
+ /// protocol properties. The return value has type
+ /// ProtocolExtensionPtrTy.
+ llvm::Constant *
+ EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+public:
+ CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method);
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetCopyStructFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ QualType Ty);
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+ ObjCNonFragileABITypesHelper ObjCTypes;
+ llvm::GlobalVariable* ObjCEmptyCacheVar;
+ llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+ /// SuperClassReferences - uniqued super class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+ /// MetaClassReferences - uniqued meta class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+ /// EHTypeReferences - uniqued class ehtype references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+ /// NonLegacyDispatchMethods - List of methods for which we do *not* generate
+ /// legacy messaging dispatch.
+ llvm::DenseSet<Selector> NonLegacyDispatchMethods;
+
+ /// DefinedMetaClasses - List of defined meta-classes.
+ std::vector<llvm::GlobalValue*> DefinedMetaClasses;
+
+ /// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+ /// NonLegacyDispatchMethods; false otherwise.
+ bool LegacyDispatchedSelector(Selector Sel);
+
+ /// FinishNonFragileABIModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishNonFragileABIModule();
+
+ /// AddModuleClassList - Add the given list of class pointers to the
+ /// module with the provided symbol and section names.
+ void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container,
+ const char *SymbolName,
+ const char *SectionName);
+
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListnfABITy.
+ llvm::Constant *EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods);
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListnfABIPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+ llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int offset);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class reference.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given super class reference.
+ llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClassRef - Return a Value * of the address of _class_t
+ /// meta-data
+ llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+ /// the given ivar.
+ ///
+ llvm::GlobalVariable * ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+ /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+ /// interface. The return value has type EHTypePtrTy.
+ llvm::Value *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition);
+
+ const char *getMetaclassSymbolPrefix() const {
+ return "OBJC_METACLASS_$_";
+ }
+
+ const char *getClassSymbolPrefix() const {
+ return "OBJC_CLASS_$_";
+ }
+
+ void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize);
+
+ // Shamelessly stolen from Analysis/CFRefCount.cpp
+ Selector GetNullarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(0, &II);
+ }
+
+ Selector GetUnarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(1, &II);
+ }
+
+ /// ImplementationIsNonLazy - Check whether the given category or
+ /// class implementation is "non-lazy".
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+public:
+ CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+ // FIXME. All stubs for now!
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel)
+ { return EmitSelector(Builder, Sel); }
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method)
+ { return EmitSelector(Builder, Method->getSelector()); }
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+ }
+ virtual llvm::Constant *GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+ }
+
+ virtual llvm::Constant *GetCopyStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+
+ virtual llvm::Constant *EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+ }
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ QualType Ty);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext,
+ llvm::Constant *C,
+ unsigned idx0,
+ unsigned idx1) {
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1)
+ };
+ return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCABI = 1;
+ EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+ return EmitSelector(Builder, Sel);
+}
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ return EmitSelector(Builder, Method->getSelector());
+}
+
+/// Generate a constant CFString object.
+/*
+ struct __builtin_CFString {
+ const int *isa; // point to __CFConstantStringClassReference
+ int flags;
+ const char *str;
+ long length;
+ };
+*/
+
+/// or Generate a constant NSString object.
+/*
+ struct __builtin_NSString {
+ const int *isa; // point to __NSConstantStringClassReference
+ const char *str;
+ unsigned int length;
+ };
+*/
+
+llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ const StringLiteral *SL) {
+ return (CGM.getLangOptions().NoConstantCFStrings == 0 ?
+ CGM.GetAddrOfConstantCFString(SL) :
+ CGM.GetAddrOfConstantNSString(SL));
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to 'super' in a class method defined in a category
+ // implementation requires an odd treatment.
+ // If we are in a class method, we must retrieve the
+ // _metaclass_ for the current class, pointed at by
+ // the class's "isa" pointer. The following assumes that
+ // isa" is the first ivar in a class (which it must be).
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else {
+ llvm::Value *MetaClassPtr = EmitMetaClassRef(Class);
+ llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1);
+ llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ Target = Super;
+ }
+ }
+ else if (isCategoryImpl)
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ else {
+ llvm::Value *ClassPtr = EmitSuperClassRef(Class);
+ ClassPtr = CGF.Builder.CreateStructGEP(ClassPtr, 1);
+ Target = CGF.Builder.CreateLoad(ClassPtr);
+ }
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+ return EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes);
+}
+
+CodeGen::RValue
+CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ CallArgList ActualArgs;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Sel),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+ llvm::Constant *Fn = NULL;
+ if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
+ : ObjCTypes.getSendStretFn(IsSuper);
+ } else if (ResultType->isFloatingType()) {
+ if (ObjCABI == 2) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+ Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFn2(IsSuper);
+ } else {
+ Fn = ObjCTypes.getSendFn2(IsSuper);
+ }
+ } else
+ // FIXME. This currently matches gcc's API for x86-32. May need to change
+ // for others if we have their API.
+ Fn = ObjCTypes.getSendFpretFn(IsSuper);
+ } else {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+ : ObjCTypes.getSendFn(IsSuper);
+ }
+ assert(Fn && "EmitLegacyMessageSend - unknown API");
+ Fn = llvm::ConstantExpr::getBitCast(Fn,
+ llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ // FIXME: We shouldn't need this, the protocol decl should contain enough
+ // information to tell us whether this was a declaration or a definition.
+ DefinedProtocols.insert(PD->getIdentifier());
+
+ // If we have generated a forward reference to this protocol, emit
+ // it now. Otherwise do nothing, the protocol objects are lazily
+ // emitted.
+ if (Protocols.count(PD->getIdentifier()))
+ GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+ if (DefinedProtocols.count(PD->getIdentifier()))
+ return GetOrEmitProtocol(PD);
+ return GetOrEmitProtocolRef(PD);
+}
+
+/*
+// APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+struct _objc_protocol {
+struct _objc_protocol_extension *isa;
+char *protocol_name;
+struct _objc_protocol_list *protocol_list;
+struct _objc__method_prototype_list *instance_methods;
+struct _objc__method_prototype_list *class_methods
+};
+
+See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] =
+ EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+ Values[3] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[4] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+ CGM.AddUsedGlobal(Entry);
+
+ return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+/*
+ struct _objc_protocol_extension {
+ uint32_t size;
+ struct objc_method_description_list *optional_instance_methods;
+ struct objc_method_description_list *optional_class_methods;
+ struct objc_property_list *instance_properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods);
+ Values[2] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods);
+ Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(),
+ 0, PD, ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+ Values[3]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+ // No special section, but goes in llvm.used
+ return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getName(),
+ Init,
+ 0, 0, true);
+}
+
+/*
+ struct objc_protocol_list {
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+ // Just return null for empty protocol lists
+ if (ProtocolRefs.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+ std::vector<llvm::Constant*> Values(3);
+ // This field is only used by the runtime.
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
+ ProtocolRefs.size() - 1);
+ Values[2] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ 4, false);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ std::vector<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ std::vector<llvm::Constant*> Prop(2);
+ for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
+ E = PROTO->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+ for (ObjCContainerDecl::prop_iterator I = PROTO->prop_begin(),
+ E = PROTO->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ if (!PropertySet.insert(PD->getIdentifier()))
+ continue;
+ Prop[0] = GetPropertyName(PD->getIdentifier());
+ Prop[1] = GetPropertyTypeString(PD, Container);
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ }
+}
+
+/*
+ struct _objc_property {
+ const char * const name;
+ const char * const attributes;
+ };
+
+ struct _objc_property_list {
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
+ };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ std::vector<llvm::Constant*> Properties, Prop(2);
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
+ E = OCD->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ PropertySet.insert(PD->getIdentifier());
+ Prop[0] = GetPropertyName(PD->getIdentifier());
+ Prop[1] = GetPropertyTypeString(PD, Container);
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
+ Prop));
+ }
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ for (ObjCInterfaceDecl::protocol_iterator P = OID->protocol_begin(),
+ E = OID->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+
+ // Return null for empty list.
+ if (Properties.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+ unsigned PropertySize =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+ Properties.size());
+ Values[2] = llvm::ConstantArray::get(AT, Properties);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" :
+ "__OBJC,__property,regular,no_dead_strip",
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+/*
+ struct objc_method_description_list {
+ int count;
+ struct objc_method_description list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(2);
+ Desc[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+ Desc);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodDescList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+ Methods.size());
+ Values[1] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+ struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ uint32_t size; // <rdar://4585769>
+ struct _objc_property_list *instance_properties;
+ };
+*/
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+ // FIXME: This is poor design, the OCD should have a pointer to the category
+ // decl. Additionally, note that Category can be null for the @implementation
+ // w/o an @interface case. Sema should just create one for us as it does for
+ // @implementation so everyone else can live life under a clear blue sky.
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+
+ llvm::SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
+ << OCD->getName();
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ std::vector<llvm::Constant*> Values(7);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ Values[1] = GetClassName(Interface->getIdentifier());
+ LazySymbols.insert(Interface->getIdentifier());
+ Values[2] =
+ EmitMethodList("\01L_OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] =
+ EmitMethodList("\01L_OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ if (Category) {
+ Values[4] =
+ EmitProtocolList("\01L_OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ }
+ Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+ // If there is no category @interface then there can be no properties.
+ if (Category) {
+ Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_CATEGORY_" + ExtName.str(), Init,
+ "__OBJC,__category,regular,no_dead_strip",
+ 4, true);
+ DefinedCategories.push_back(GV);
+}
+
+// FIXME: Get from somewhere?
+enum ClassFlags {
+ eClassFlags_Factory = 0x00001,
+ eClassFlags_Meta = 0x00002,
+ // <rdr://5142207>
+ eClassFlags_HasCXXStructors = 0x02000,
+ eClassFlags_Hidden = 0x20000,
+ eClassFlags_ABI2_Hidden = 0x00010,
+ eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+};
+
+/*
+ struct _objc_class {
+ Class isa;
+ Class super_class;
+ const char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct _objc_cache *cache;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions (<rdr://4585769>)
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+
+ See EmitClassExtension();
+*/
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ DefinedSymbols.insert(ID->getIdentifier());
+
+ std::string ClassName = ID->getNameAsString();
+ // FIXME: Gross
+ ObjCInterfaceDecl *Interface =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ llvm::Constant *Protocols =
+ EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+ Interface->protocol_begin(),
+ Interface->protocol_end());
+ unsigned Flags = eClassFlags_Factory;
+ if (ID->getNumIvarInitializers())
+ Flags |= eClassFlags_HasCXXStructors;
+ unsigned Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
+
+ // FIXME: Set CXX-structors flag.
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(12);
+ Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+ // Record a reference to the super class.
+ LazySymbols.insert(Super->getIdentifier());
+
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, false);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getName(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ Values[10] = BuildIvarLayout(ID, true);
+ Values[11] = EmitClassExtension(ID);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+ std::string Name("\01L_OBJC_CLASS_");
+ Name += ClassName;
+ const char *Section = "__OBJC,__class,regular,no_dead_strip";
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ GV->setSection(Section);
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+ }
+ else
+ GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ DefinedClasses.push_back(GV);
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods) {
+ unsigned Flags = eClassFlags_Meta;
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> Values(12);
+ // The isa for the metaclass is the root of the hierarchy.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ Values[ 0] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ // The super class for the metaclass is emitted as the name of the
+ // super class. The runtime fixes this up to point to the
+ // *metaclass* for the super class.
+ if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, true);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip",
+ Methods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ // ivar_layout for metaclass is always NULL.
+ Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ // The class extension is always unused for metaclasses.
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ std::string Name("\01L_OBJC_METACLASS_");
+ Name += ID->getNameAsCString();
+
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ } else {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name);
+ }
+ GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+
+ return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+
+ // FIXME: Should we look these up somewhere other than the module. Its a bit
+ // silly since we only generate these while processing an implementation, so
+ // exactly one pointer would work if know when we entered/exitted an
+ // implementation block.
+
+ // Check for an existing forward reference.
+ // Previously, metaclass with internal linkage may have been defined.
+ // pass 'true' as 2nd argument so it is returned.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ return GV;
+ } else {
+ // Generate as an external reference to keep a consistent
+ // module. This will be patched up when we emit the metaclass.
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
+
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward class metadata reference has incorrect type.");
+ return GV;
+ } else {
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+/*
+ struct objc_class_ext {
+ uint32_t size;
+ const char *weak_ivar_layout;
+ struct _objc_property_list *properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = BuildIvarLayout(ID, false);
+ Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+ return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getName(),
+ Init, "__OBJC,__class_ext,regular,no_dead_strip",
+ 4, true);
+}
+
+/*
+ struct objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+
+ struct objc_ivar_list {
+ int ivar_count;
+ struct objc_ivar list[count];
+ };
+*/
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass) {
+ std::vector<llvm::Constant*> Ivars, Ivar(3);
+
+ // When emitting the root class GCC emits ivar entries for the
+ // actual class structure. It is not clear if we need to follow this
+ // behavior; for now lets try and get away with not doing it. If so,
+ // the cleanest solution would be to make up an ObjCInterfaceDecl
+ // for the class.
+ if (ForClass)
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ ObjCInterfaceDecl *OID =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ Ivar[0] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[1] = GetMethodVarType(IVD);
+ Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD));
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+ }
+
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+ Ivars.size());
+ Values[1] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV;
+ if (ForClass)
+ GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__class_vars,regular,no_dead_strip",
+ 4, true);
+ else
+ GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__instance_vars,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+ struct objc_method {
+ SEL method_name;
+ char *method_types;
+ void *method;
+ };
+
+ struct objc_method_list {
+ struct objc_method_list *obsolete;
+ int count;
+ struct objc_method methods_list[count];
+ };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+ // FIXME: Use DenseMap::lookup
+ llvm::Function *Fn = MethodDefinitions[MD];
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ llvm::SmallString<256> Name;
+ GetNameForMethod(OMD, CD, Name);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ llvm::Function *Method =
+ llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ Name.str(),
+ &CGM.getModule());
+ MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+ return Method;
+}
+
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateMetadataVar(llvm::Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed) {
+ const llvm::Type *Ty = Init->getType();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Ty, false,
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ if (Section)
+ GV->setSection(Section);
+ if (Align)
+ GV->setAlignment(Align);
+ if (AddToUsed)
+ CGM.AddUsedGlobal(GV);
+ return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+ // Abuse this interface function as a place to finalize.
+ FinishModule();
+ return NULL;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetCopyStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+}
+
+/*
+
+ Objective-C setjmp-longjmp (sjlj) Exception Handling
+ --
+
+ The basic framework for a @try-catch-finally is as follows:
+ {
+ objc_exception_data d;
+ id _rethrow = null;
+ bool _call_try_exit = true;
+
+ objc_exception_try_enter(&d);
+ if (!setjmp(d.jmp_buf)) {
+ ... try body ...
+ } else {
+ // exception path
+ id _caught = objc_exception_extract(&d);
+
+ // enter new try scope for handlers
+ if (!setjmp(d.jmp_buf)) {
+ ... match exception and execute catch blocks ...
+
+ // fell off end, rethrow.
+ _rethrow = _caught;
+ ... jump-through-finally to finally_rethrow ...
+ } else {
+ // exception in catch block
+ _rethrow = objc_exception_extract(&d);
+ _call_try_exit = false;
+ ... jump-through-finally to finally_rethrow ...
+ }
+ }
+ ... jump-through-finally to finally_end ...
+
+ finally:
+ if (_call_try_exit)
+ objc_exception_try_exit(&d);
+
+ ... finally block ....
+ ... dispatch to finally destination ...
+
+ finally_rethrow:
+ objc_exception_throw(_rethrow);
+
+ finally_end:
+ }
+
+ This framework differs slightly from the one gcc uses, in that gcc
+ uses _rethrow to determine if objc_exception_try_exit should be called
+ and if the object should be rethrown. This breaks in the face of
+ throwing nil and introduces unnecessary branches.
+
+ We specialize this framework for a few particular circumstances:
+
+ - If there are no catch blocks, then we avoid emitting the second
+ exception handling context.
+
+ - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+ e)) we avoid emitting the code to rethrow an uncaught exception.
+
+ - FIXME: If there is no @finally block we can do a few more
+ simplifications.
+
+ Rethrows and Jumps-Through-Finally
+ --
+
+ Support for implicit rethrows and jumping through the finally block is
+ handled by storing the current exception-handling context in
+ ObjCEHStack.
+
+ In order to implement proper @finally semantics, we support one basic
+ mechanism for jumping through the finally block to an arbitrary
+ destination. Constructs which generate exits from a @try or @catch
+ block use this mechanism to implement the proper semantics by chaining
+ jumps, as necessary.
+
+ This mechanism works like the one used for indirect goto: we
+ arbitrarily assign an ID to each destination and store the ID for the
+ destination in a variable prior to entering the finally block. At the
+ end of the finally block we simply create a switch to the proper
+ destination.
+
+ Code gen for @synchronized(expr) stmt;
+ Effectively generating code for:
+ objc_sync_enter(expr);
+ @try stmt @finally { objc_sync_exit(expr); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ // Create various blocks we refer to for handling @finally.
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit");
+ llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can safely avoid a temp here because jumps into
+ // @synchronized are illegal & this will dominate uses.
+ llvm::Value *SyncArg = 0;
+ if (!isTry) {
+ SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ }
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ if (CGF.ObjCEHValueStack.empty())
+ CGF.ObjCEHValueStack.push_back(0);
+ // If This is a nested @try, caught exception is that of enclosing @try.
+ else
+ CGF.ObjCEHValueStack.push_back(CGF.ObjCEHValueStack.back());
+ // Allocate memory for the exception data and rethrow pointer.
+ llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ "exceptiondata.ptr");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
+ "_rethrow");
+ llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(
+ llvm::Type::getInt1Ty(VMContext),
+ "_call_try_exit");
+ CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext),
+ CallTryExitPtr);
+
+ // Enter a new try block and call setjmp.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+ llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0,
+ "jmpbufarray");
+ JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp");
+ llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+ JmpBufPtr, "result");
+
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"),
+ TryHandler, TryBlock);
+
+ // Emit the @try block.
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the "exception in @try" block.
+ CGF.EmitBlock(TryHandler);
+
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::Value *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ CGF.ObjCEHValueStack.back() = Caught;
+ if (!isTry) {
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
+ CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ } else if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
+
+ // Enter a new exception try block (in case a @catch block throws
+ // an exception).
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+
+ llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+ JmpBufPtr, "result");
+ llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw");
+
+ llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
+ llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+
+ // Handle catch list. As a special case we check if everything is
+ // matched and avoid generating code for falling off the end if
+ // so.
+ bool AllMatched = false;
+ for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
+
+ const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const ObjCObjectPointerType *OPT = 0;
+
+ // catch(...) always matches.
+ if (!CatchParam) {
+ AllMatched = true;
+ } else {
+ OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
+
+ // catch(id e) always matches.
+ // FIXME: For the time being we also match id<X>; this should
+ // be rejected by Sema instead.
+ if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
+ AllMatched = true;
+ }
+
+ if (AllMatched) {
+ if (CatchParam) {
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+ CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+ break;
+ }
+
+ assert(OPT && "Unexpected non-object pointer type in @catch");
+ const ObjCObjectType *ObjTy = OPT->getObjectType();
+ ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
+ assert(IDecl && "Catch parameter must have Objective-C type!");
+
+ // Check if the @catch block matches the exception object.
+ llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
+
+ llvm::Value *Match =
+ CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
+ Class, Caught, "match");
+
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched");
+
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+ MatchedBlock, NextCatchBlock);
+
+ // Emit the @catch block.
+ CGF.EmitBlock(MatchedBlock);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ llvm::Value *Tmp =
+ CGF.Builder.CreateBitCast(Caught,
+ CGF.ConvertType(CatchParam->getType()),
+ "tmp");
+ CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(NextCatchBlock);
+ }
+
+ if (!AllMatched) {
+ // None of the handlers caught the exception, so store it to be
+ // rethrown at the end of the @finally block.
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData),
+ RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
+ CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ } else {
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
+ CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ // Pop the exception-handling stack entry. It is important to do
+ // this now, because the code in the @finally block is not in this
+ // context.
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Emit the @finally block.
+ CGF.EmitBlock(FinallyBlock);
+ llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp");
+
+ CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit);
+
+ CGF.EmitBlock(FinallyExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData);
+
+ CGF.EmitBlock(FinallyNoExit);
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ }
+
+ // Emit the switch block
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ CGF.EmitBlock(FinallyRethrow);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
+ ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ QualType Ty) {
+ // Get size info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
+ unsigned long size = TypeInfo.first/8;
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, N);
+ return;
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+ Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+/// unsigned version;
+/// unsigned flags;
+/// };
+enum ImageInfoFlags {
+ eImageInfo_FixAndContinue = (1 << 0),
+ eImageInfo_GarbageCollected = (1 << 1),
+ eImageInfo_GCOnly = (1 << 2),
+ eImageInfo_OptimizedByDyld = (1 << 3), // FIXME: When is this set.
+
+ // A flag indicating that the module has no instances of a @synthesize of a
+ // superclass variable. <rdar://problem/6803242>
+ eImageInfo_CorrectedSynthesize = (1 << 4)
+};
+
+void CGObjCCommonMac::EmitImageInfo() {
+ unsigned version = 0; // Version is unused?
+ unsigned flags = 0;
+
+ // FIXME: Fix and continue?
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+ flags |= eImageInfo_GarbageCollected;
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ flags |= eImageInfo_GCOnly;
+
+ // We never allow @synthesize of a superclass property.
+ flags |= eImageInfo_CorrectedSynthesize;
+
+ // Emitted as int[2];
+ llvm::Constant *values[2] = {
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), version),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags)
+ };
+ llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), 2);
+
+ const char *Section;
+ if (ObjCABI == 1)
+ Section = "__OBJC, __image_info,regular";
+ else
+ Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
+ llvm::ConstantArray::get(AT, values, 2),
+ Section,
+ 0,
+ true);
+ GV->setConstant(true);
+}
+
+
+// struct objc_module {
+// unsigned long version;
+// unsigned long size;
+// const char *name;
+// Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+ uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ Values[2] = GetClassName(&CGM.getContext().Idents.get(""));
+ Values[3] = EmitModuleSymbols();
+ CreateMetadataVar("\01L_OBJC_MODULES",
+ llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ "__OBJC,__module_info,regular,no_dead_strip",
+ 4, true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+ unsigned NumClasses = DefinedClasses.size();
+ unsigned NumCategories = DefinedCategories.size();
+
+ // Return null if no symbols were defined.
+ if (!NumClasses && !NumCategories)
+ return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+ Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+ Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+ // The runtime expects exactly the list of defined classes followed
+ // by the list of defined categories, in a single array.
+ std::vector<llvm::Constant*> Symbols(NumClasses + NumCategories);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+ ObjCTypes.Int8PtrTy);
+ for (unsigned i=0; i<NumCategories; i++)
+ Symbols[NumClasses + i] =
+ llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+ ObjCTypes.Int8PtrTy);
+
+ Values[4] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses + NumCategories),
+ Symbols);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
+ "__OBJC,__symbols,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ LazySymbols.insert(ID->getIdentifier());
+
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = ClassNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(VMContext,
+ Ident->getNameStart()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
+ if (FQT.isObjCGCStrong())
+ return Qualifiers::Strong;
+
+ if (FQT.isObjCGCWeak())
+ return Qualifiers::Weak;
+
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::Strong;
+
+ if (const PointerType *PT = FQT->getAs<PointerType>())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+ return Qualifiers::GCNone;
+}
+
+void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos,
+ bool ForStrongLayout,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ // FIXME - Use iterator.
+ llvm::SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
+ ForStrongLayout, HasUnion);
+}
+
+void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ uint64_t MaxUnionIvarSize = 0;
+ uint64_t MaxSkippedUnionIvarSize = 0;
+ FieldDecl *MaxField = 0;
+ FieldDecl *MaxSkippedField = 0;
+ FieldDecl *LastFieldBitfield = 0;
+ uint64_t MaxFieldOffset = 0;
+ uint64_t MaxSkippedFieldOffset = 0;
+ uint64_t LastBitfieldOffset = 0;
+
+ if (RecFields.empty())
+ return;
+ unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ FieldDecl *Field = RecFields[i];
+ uint64_t FieldOffset;
+ if (RD) {
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ FieldOffset = RL.getFieldOffset(i) / 8;
+ } else
+ FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfield = Field;
+ LastBitfieldOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfield = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildAggrIvarRecordLayout(FQT->getAs<RecordType>(),
+ BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType()) {
+ int OldIndex = IvarsInfo.size() - 1;
+ int OldSkIndex = SkipIvars.size() -1;
+
+ const RecordType *RT = FQT->getAs<RecordType>();
+ BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = IvarsInfo.size() - 1,
+ FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
+ IvarsInfo[i].ivar_size));
+ for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
+ SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
+ SkipIvars[i].ivar_size));
+ }
+ continue;
+ }
+ }
+ // At this point, we are done with Record/Union and array there of.
+ // For other arrays we are down to its element type.
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
+
+ unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
+ if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
+ || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
+ if (IsUnion) {
+ uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
+ if (UnionIvarSize > MaxUnionIvarSize) {
+ MaxUnionIvarSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / WordSizeInBits));
+ }
+ } else if ((ForStrongLayout &&
+ (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak))
+ || (!ForStrongLayout && GCAttr != Qualifiers::Weak)) {
+ if (IsUnion) {
+ // FIXME: Why the asymmetry? We divide by word size in bits on other
+ // side.
+ uint64_t UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxSkippedUnionIvarSize) {
+ MaxSkippedUnionIvarSize = UnionIvarSize;
+ MaxSkippedField = Field;
+ MaxSkippedFieldOffset = FieldOffset;
+ }
+ } else {
+ // FIXME: Why the asymmetry, we divide by byte size in bits here?
+ SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+ }
+
+ if (LastFieldBitfield) {
+ // Last field was a bitfield. Must update skip info.
+ Expr *BitWidth = LastFieldBitfield->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ }
+
+ if (MaxField)
+ IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
+ MaxUnionIvarSize));
+ if (MaxSkippedField)
+ SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
+ MaxSkippedUnionIvarSize));
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+ const ObjCImplementationDecl *OMD,
+ bool ForStrongLayout) {
+ bool hasUnion = false;
+
+ unsigned int WordsToScan, WordsToSkip;
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return llvm::Constant::getNullValue(PtrTy);
+
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ CGM.getContext().CollectObjCIvars(OI, RecFields);
+
+ // Add this implementations synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().CollectNonClassIvars(OI, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+
+ if (RecFields.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ SkipIvars.clear();
+ IvarsInfo.clear();
+
+ BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+ if (IvarsInfo.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ // Sort on byte position in case we encounterred a union nested in
+ // the ivar list.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ // Build the string of skip/scan nibbles
+ llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+ unsigned int WordSize =
+ CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ if (IvarsInfo[0].ivar_bytepos == 0) {
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ } else {
+ WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ }
+ for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
+ unsigned int TailPrevGCObjC =
+ IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+ if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
+ // consecutive 'scanned' object pointers.
+ WordsToScan += IvarsInfo[i].ivar_size;
+ } else {
+ // Skip over 'gc'able object pointer which lay over each other.
+ if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
+ continue;
+ // Must skip over 1 or more words. We save current skip/scan values
+ // and start a new pair.
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+
+ // Skip the hole.
+ SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[i].ivar_size;
+ }
+ }
+ if (WordsToScan > 0) {
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+ }
+
+ if (!SkipIvars.empty()) {
+ unsigned int LastIndex = SkipIvars.size()-1;
+ int LastByteSkipped =
+ SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+ LastIndex = IvarsInfo.size()-1;
+ int LastByteScanned =
+ IvarsInfo[LastIndex].ivar_bytepos +
+ IvarsInfo[LastIndex].ivar_size * WordSize;
+ // Compute number of bytes to skip at the tail end of the last ivar scanned.
+ if (LastByteSkipped > LastByteScanned) {
+ unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
+ SKIP_SCAN SkScan;
+ SkScan.skip = TotalWords - (LastByteScanned/WordSize);
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ }
+ }
+ // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
+ // as 0xMN.
+ int SkipScan = SkipScanIvars.size()-1;
+ for (int i = 0; i <= SkipScan; i++) {
+ if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
+ && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
+ // 0xM0 followed by 0x0N detected.
+ SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
+ for (int j = i+1; j < SkipScan; j++)
+ SkipScanIvars[j] = SkipScanIvars[j+1];
+ --SkipScan;
+ }
+ }
+
+ // Generate the string.
+ std::string BitMap;
+ for (int i = 0; i <= SkipScan; i++) {
+ unsigned char byte;
+ unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
+ unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
+ unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
+ unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
+
+ // first skip big.
+ for (unsigned int ix = 0; ix < skip_big; ix++)
+ BitMap += (unsigned char)(0xf0);
+
+ // next (skip small, scan)
+ if (skip_small) {
+ byte = skip_small << 4;
+ if (scan_big > 0) {
+ byte |= 0xf;
+ --scan_big;
+ } else if (scan_small) {
+ byte |= scan_small;
+ scan_small = 0;
+ }
+ BitMap += byte;
+ }
+ // next scan big
+ for (unsigned int ix = 0; ix < scan_big; ix++)
+ BitMap += (unsigned char)(0x0f);
+ // last scan small
+ if (scan_small) {
+ byte = scan_small;
+ BitMap += byte;
+ }
+ }
+ // null terminate string.
+ unsigned char zero = 0;
+ BitMap += zero;
+
+ if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ printf("\n%s ivar layout for class '%s': ",
+ ForStrongLayout ? "strong" : "weak",
+ OMD->getClassInterface()->getNameAsCString());
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0; i < BitMap.size(); i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+ llvm::GlobalVariable * Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(VMContext, BitMap.c_str()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+ llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+ // FIXME: Avoid std::string copying.
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
+ llvm::ConstantArray::get(VMContext, Sel.getAsString()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+ return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(const std::string &Name) {
+ return GetMethodVarName(&CGM.getContext().Idents.get(Name));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(VMContext, TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(const_cast<ObjCMethodDecl*>(D),
+ TypeStr);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(VMContext, TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
+ llvm::ConstantArray::get(VMContext,
+ Ident->getNameStart()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+ const ObjCContainerDecl *CD,
+ llvm::SmallVectorImpl<char> &Name) {
+ llvm::raw_svector_ostream OS(Name);
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
+ << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+ OS << '(' << CID << ')';
+ OS << ' ' << D->getSelector().getAsString() << ']';
+}
+
+void CGObjCMac::FinishModule() {
+ EmitModuleInfo();
+
+ // Emit the dummy bodies for any protocols which were referenced but
+ // never defined.
+ for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+ I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
+ if (I->second->hasInitializer())
+ continue;
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ Values[1] = GetClassName(I->first);
+ Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[3] = Values[4] =
+ llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+ I->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+ I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values));
+ CGM.AddUsedGlobal(I->second);
+ }
+
+ // Add assembler directives to add lazy undefined symbol references
+ // for classes which are referenced but not defined. This is
+ // important for correct linker interaction.
+ //
+ // FIXME: It would be nice if we had an LLVM construct for this.
+ if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+ llvm::SmallString<256> Asm;
+ Asm += CGM.getModule().getModuleInlineAsm();
+ if (!Asm.empty() && Asm.back() != '\n')
+ Asm += '\n';
+
+ llvm::raw_svector_ostream OS(Asm);
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
+ e = DefinedSymbols.end(); I != e; ++I)
+ OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
+ e = LazySymbols.end(); I != e; ++I)
+ OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+
+ CGM.getModule().setModuleInlineAsm(OS.str());
+ }
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
+ ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+ : VMContext(cgm.getLLVMContext()), CGM(cgm) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ ShortTy = Types.ConvertType(Ctx.ShortTy);
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ LongTy = Types.ConvertType(Ctx.LongTy);
+ LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+ PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+ // FIXME: It would be nice to unify this with the opaque type, so that the IR
+ // comes out a bit cleaner.
+ const llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+
+ // I'm not sure I like this. The implicit coordination is a bit
+ // gross. We should solve this in a reasonable fashion because this
+ // is a pretty common task (match some runtime data structure with
+ // an LLVM data structure).
+
+ // FIXME: This is leaked.
+ // FIXME: Merge with rewriter code?
+
+ // struct _objc_super {
+ // id self;
+ // Class cls;
+ // }
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(),
+ &Ctx.Idents.get("_objc_super"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCIdType(), 0, 0, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCClassType(), 0, 0, false));
+ RD->completeDefinition();
+
+ SuperCTy = Ctx.getTagDeclType(RD);
+ SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+ SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+ SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+ // struct _prop_t {
+ // char *name;
+ // char *attributes;
+ // }
+ PropertyTy = llvm::StructType::get(VMContext, Int8PtrTy, Int8PtrTy, NULL);
+ CGM.getModule().addTypeName("struct._prop_t",
+ PropertyTy);
+
+ // struct _prop_list_t {
+ // uint32_t entsize; // sizeof(struct _prop_t)
+ // uint32_t count_of_properties;
+ // struct _prop_t prop_list[count_of_properties];
+ // }
+ PropertyListTy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ llvm::ArrayType::get(PropertyTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._prop_list_t",
+ PropertyListTy);
+ // struct _prop_list_t *
+ PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+ // struct _objc_method {
+ // SEL _cmd;
+ // char *method_type;
+ // char *_imp;
+ // }
+ MethodTy = llvm::StructType::get(VMContext, SelectorPtrTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method", MethodTy);
+
+ // struct _objc_cache *
+ CacheTy = llvm::OpaqueType::get(VMContext);
+ CGM.getModule().addTypeName("struct._objc_cache", CacheTy);
+ CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _objc_method_description {
+ // SEL name;
+ // char *types;
+ // }
+ MethodDescriptionTy =
+ llvm::StructType::get(VMContext, SelectorPtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description",
+ MethodDescriptionTy);
+
+ // struct _objc_method_description_list {
+ // int count;
+ // struct _objc_method_description[1];
+ // }
+ MethodDescriptionListTy =
+ llvm::StructType::get(VMContext, IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description_list",
+ MethodDescriptionListTy);
+
+ // struct _objc_method_description_list *
+ MethodDescriptionListPtrTy =
+ llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+ // Protocol description structures
+
+ // struct _objc_protocol_extension {
+ // uint32_t size; // sizeof(struct _objc_protocol_extension)
+ // struct _objc_method_description_list *optional_instance_methods;
+ // struct _objc_method_description_list *optional_class_methods;
+ // struct _objc_property_list *instance_properties;
+ // }
+ ProtocolExtensionTy =
+ llvm::StructType::get(VMContext, IntTy,
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_extension",
+ ProtocolExtensionTy);
+
+ // struct _objc_protocol_extension *
+ ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+ // Handle recursive construction of Protocol and ProtocolList types
+
+ llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get(VMContext);
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+
+ const llvm::Type *T =
+ llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ LongTy,
+ llvm::ArrayType::get(ProtocolTyHolder, 0),
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T);
+
+ // struct _objc_protocol {
+ // struct _objc_protocol_extension *isa;
+ // char *protocol_name;
+ // struct _objc_protocol **_objc_protocol_list;
+ // struct _objc_method_description_list *instance_methods;
+ // struct _objc_method_description_list *class_methods;
+ // }
+ T = llvm::StructType::get(VMContext, ProtocolExtensionPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T);
+
+ ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListTy);
+ // struct _objc_protocol_list *
+ ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+ ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy);
+ ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+ // Class description structures
+
+ // struct _objc_ivar {
+ // char *ivar_name;
+ // char *ivar_type;
+ // int ivar_offset;
+ // }
+ IvarTy = llvm::StructType::get(VMContext, Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_ivar", IvarTy);
+
+ // struct _objc_ivar_list *
+ IvarListTy = llvm::OpaqueType::get(VMContext);
+ CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy);
+ IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+ // struct _objc_method_list *
+ MethodListTy = llvm::OpaqueType::get(VMContext);
+ CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy);
+ MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+ // struct _objc_class_extension *
+ ClassExtensionTy =
+ llvm::StructType::get(VMContext, IntTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy);
+ ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+
+ // struct _objc_class {
+ // Class isa;
+ // Class super_class;
+ // char *name;
+ // long version;
+ // long info;
+ // long instance_size;
+ // struct _objc_ivar_list *ivars;
+ // struct _objc_method_list *methods;
+ // struct _objc_cache *cache;
+ // struct _objc_protocol_list *protocols;
+ // char *ivar_layout;
+ // struct _objc_class_ext *ext;
+ // };
+ T = llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T);
+
+ ClassTy = cast<llvm::StructType>(ClassTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_class", ClassTy);
+ ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+ // struct _objc_category {
+ // char *category_name;
+ // char *class_name;
+ // struct _objc_method_list *instance_method;
+ // struct _objc_method_list *class_method;
+ // uint32_t size; // sizeof(struct _objc_category)
+ // struct _objc_property_list *instance_properties;// category's @property
+ // }
+ CategoryTy = llvm::StructType::get(VMContext, Int8PtrTy,
+ Int8PtrTy,
+ MethodListPtrTy,
+ MethodListPtrTy,
+ ProtocolListPtrTy,
+ IntTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_category", CategoryTy);
+
+ // Global metadata structures
+
+ // struct _objc_symtab {
+ // long sel_ref_cnt;
+ // SEL *refs;
+ // short cls_def_cnt;
+ // short cat_def_cnt;
+ // char *defs[cls_def_cnt + cat_def_cnt];
+ // }
+ SymtabTy = llvm::StructType::get(VMContext, LongTy,
+ SelectorPtrTy,
+ ShortTy,
+ ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy);
+ SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+ // struct _objc_module {
+ // long version;
+ // long size; // sizeof(struct _objc_module)
+ // char *name;
+ // struct _objc_symtab* symtab;
+ // }
+ ModuleTy =
+ llvm::StructType::get(VMContext, LongTy,
+ LongTy,
+ Int8PtrTy,
+ SymtabPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_module", ModuleTy);
+
+
+ // FIXME: This is the size of the setjmp buffer and should be target
+ // specific. 18 is what's used on 32-bit X86.
+ uint64_t SetJmpBufferSize = 18;
+
+ // Exceptions
+ const llvm::Type *StackPtrTy = llvm::ArrayType::get(
+ llvm::Type::getInt8PtrTy(VMContext), 4);
+
+ ExceptionDataTy =
+ llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
+ SetJmpBufferSize),
+ StackPtrTy, NULL);
+ CGM.getModule().addTypeName("struct._objc_exception_data",
+ ExceptionDataTy);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _method_list_t {
+ // uint32_t entsize; // sizeof(struct _objc_method)
+ // uint32_t method_count;
+ // struct _objc_method method_list[method_count];
+ // }
+ MethodListnfABITy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ llvm::ArrayType::get(MethodTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct.__method_list_t",
+ MethodListnfABITy);
+ // struct method_list_t *
+ MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+ // struct _protocol_t {
+ // id isa; // NULL
+ // const char * const protocol_name;
+ // const struct _protocol_list_t * protocol_list; // super protocols
+ // const struct method_list_t * const instance_methods;
+ // const struct method_list_t * const class_methods;
+ // const struct method_list_t *optionalInstanceMethods;
+ // const struct method_list_t *optionalClassMethods;
+ // const struct _prop_list_t * properties;
+ // const uint32_t size; // sizeof(struct _protocol_t)
+ // const uint32_t flags; // = 0
+ // }
+
+ // Holder for struct _protocol_list_t *
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+
+ ProtocolnfABITy = llvm::StructType::get(VMContext, ObjectPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(
+ ProtocolListTyHolder),
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ PropertyListPtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._protocol_t",
+ ProtocolnfABITy);
+
+ // struct _protocol_t*
+ ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+ // struct _protocol_list_t {
+ // long protocol_count; // Note, this is 32/64 bit
+ // struct _protocol_t *[protocol_count];
+ // }
+ ProtocolListnfABITy = llvm::StructType::get(VMContext, LongTy,
+ llvm::ArrayType::get(
+ ProtocolnfABIPtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListnfABITy);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(
+ ProtocolListnfABITy);
+
+ // struct _objc_protocol_list*
+ ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+ // struct _ivar_t {
+ // unsigned long int *offset; // pointer to ivar offset location
+ // char *name;
+ // char *type;
+ // uint32_t alignment;
+ // uint32_t size;
+ // }
+ IvarnfABITy = llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy);
+
+ // struct _ivar_list_t {
+ // uint32 entsize; // sizeof(struct _ivar_t)
+ // uint32 count;
+ // struct _iver_t list[count];
+ // }
+ IvarListnfABITy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ llvm::ArrayType::get(
+ IvarnfABITy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy);
+
+ IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+ // struct _class_ro_t {
+ // uint32_t const flags;
+ // uint32_t const instanceStart;
+ // uint32_t const instanceSize;
+ // uint32_t const reserved; // only when building for 64bit targets
+ // const uint8_t * const ivarLayout;
+ // const char *const name;
+ // const struct _method_list_t * const baseMethods;
+ // const struct _objc_protocol_list *const baseProtocols;
+ // const struct _ivar_list_t *const ivars;
+ // const uint8_t * const weakIvarLayout;
+ // const struct _prop_list_t * const properties;
+ // }
+
+ // FIXME. Add 'reserved' field in 64bit abi mode!
+ ClassRonfABITy = llvm::StructType::get(VMContext, IntTy,
+ IntTy,
+ IntTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._class_ro_t",
+ ClassRonfABITy);
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ ImpnfABITy = llvm::PointerType::getUnqual(
+ llvm::FunctionType::get(ObjectPtrTy, Params, false));
+
+ // struct _class_t {
+ // struct _class_t *isa;
+ // struct _class_t * const superclass;
+ // void *cache;
+ // IMP *vtable;
+ // struct class_ro_t *ro;
+ // }
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+ ClassnfABITy =
+ llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(ClassRonfABITy),
+ NULL);
+ CGM.getModule().addTypeName("struct._class_t", ClassnfABITy);
+
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(
+ ClassnfABITy);
+
+ // LLVM for struct _class_t *
+ ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+ // struct _category_t {
+ // const char * const name;
+ // struct _class_t *const cls;
+ // const struct _method_list_t * const instance_methods;
+ // const struct _method_list_t * const class_methods;
+ // const struct _protocol_list_t * const protocols;
+ // const struct _prop_list_t * const properties;
+ // }
+ CategorynfABITy = llvm::StructType::get(VMContext, Int8PtrTy,
+ ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._category_t", CategorynfABITy);
+
+ // New types for nonfragile abi messaging.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+
+ // First the clang type for struct _message_ref_t
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(),
+ &Ctx.Idents.get("_message_ref_t"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.VoidPtrTy, 0, 0, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCSelType(), 0, 0, false));
+ RD->completeDefinition();
+
+ MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+ MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ SuperMessageRefTy = llvm::StructType::get(VMContext, ImpnfABITy,
+ SelectorPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy);
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+ // struct objc_typeinfo {
+ // const void** vtable; // objc_ehtype_vtable + 2
+ // const char* name; // c++ typeinfo string
+ // Class cls;
+ // };
+ EHTypeTy = llvm::StructType::get(VMContext,
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy);
+ EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+ FinishNonFragileABIModule();
+
+ return NULL;
+}
+
+void CGObjCNonFragileABIMac::AddModuleClassList(const
+ std::vector<llvm::GlobalValue*>
+ &Container,
+ const char *SymbolName,
+ const char *SectionName) {
+ unsigned NumClasses = Container.size();
+
+ if (!NumClasses)
+ return;
+
+ std::vector<llvm::Constant*> Symbols(NumClasses);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+ ObjCTypes.Int8PtrTy);
+ llvm::Constant* Init =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses),
+ Symbols);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ SymbolName);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(SectionName);
+ CGM.AddUsedGlobal(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+ // nonfragile abi has no module definition.
+
+ // Build list of all implemented class addresses in array
+ // L_OBJC_LABEL_CLASS_$.
+ AddModuleClassList(DefinedClasses,
+ "\01L_OBJC_LABEL_CLASS_$",
+ "__DATA, __objc_classlist, regular, no_dead_strip");
+
+ for (unsigned i = 0; i < DefinedClasses.size(); i++) {
+ llvm::GlobalValue *IMPLGV = DefinedClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ for (unsigned i = 0; i < DefinedMetaClasses.size(); i++) {
+ llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ AddModuleClassList(DefinedNonLazyClasses,
+ "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+ // Build list of all implemented category addresses in array
+ // L_OBJC_LABEL_CATEGORY_$.
+ AddModuleClassList(DefinedCategories,
+ "\01L_OBJC_LABEL_CATEGORY_$",
+ "__DATA, __objc_catlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyCategories,
+ "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+ EmitImageInfo();
+}
+
+/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+/// NonLegacyDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+///
+bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ default:
+ assert(0 && "Invalid dispatch method!");
+ case CodeGenOptions::Legacy:
+ return true;
+ case CodeGenOptions::NonLegacy:
+ return false;
+ case CodeGenOptions::Mixed:
+ break;
+ }
+
+ // If so, see whether this selector is in the white-list of things which must
+ // use the new dispatch convention. We lazily build a dense set for this.
+ if (NonLegacyDispatchMethods.empty()) {
+ NonLegacyDispatchMethods.insert(GetNullarySelector("alloc"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("class"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("self"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("length"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("count"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("retain"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("release"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("autorelease"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("hash"));
+
+ NonLegacyDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqual"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("addObject"));
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ NonLegacyDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+
+ return (NonLegacyDispatchMethods.count(Sel) == 0);
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20
+};
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+/// uint32_t const flags;
+/// uint32_t const instanceStart;
+/// uint32_t const instanceSize;
+/// uint32_t const reserved; // only when building for 64bit targets
+/// const uint8_t * const ivarLayout;
+/// const char *const name;
+/// const struct _method_list_t * const baseMethods;
+/// const struct _protocol_list_t *const baseProtocols;
+/// const struct _ivar_list_t *const ivars;
+/// const uint8_t * const weakIvarLayout;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+ unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+ Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+ Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+ // FIXME. For 64bit targets add 0 here.
+ Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, true);
+ Values[ 4] = GetClassName(ID->getIdentifier());
+ // const struct _method_list_t * const baseMethods;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName("\01l_OBJC_$_");
+ if (flags & CLS_META) {
+ MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ } else {
+ MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ }
+ }
+ }
+ Values[ 5] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const", Methods);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+ Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ + OID->getName(),
+ OID->protocol_begin(),
+ OID->protocol_end());
+
+ if (flags & CLS_META)
+ Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ else
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, false);
+ if (flags & CLS_META)
+ Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ else
+ Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+ Values);
+ llvm::GlobalVariable *CLASS_RO_GV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ (flags & CLS_META) ?
+ std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+ std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
+ CLASS_RO_GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
+ return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t * const superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
+ std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = IsAGV;
+ Values[1] = SuperClassGV;
+ if (!Values[1])
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ Values[2] = ObjCEmptyCacheVar; // &ObjCEmptyCacheVar
+ Values[3] = ObjCEmptyVtableVar; // &ObjCEmptyVtableVar
+ Values[4] = ClassRoGV; // &CLASS_RO_GV
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+ Values);
+ llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+ GV->setInitializer(Init);
+ GV->setSection("__DATA, __objc_data");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ if (HiddenVisibility)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(GetNullarySelector("load")) != 0;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize) {
+ const ASTRecordLayout &RL =
+ CGM.getContext().getASTObjCImplementationLayout(OID);
+
+ // InstanceSize is really instance end.
+ InstanceSize = llvm::RoundUpToAlignment(RL.getDataSize(), 8) / 8;
+
+ // If there are no fields, the start is the same as the end.
+ if (!RL.getFieldCount())
+ InstanceStart = InstanceSize;
+ else
+ InstanceStart = RL.getFieldOffset(0) / 8;
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ if (!ObjCEmptyCacheVar) {
+ ObjCEmptyCacheVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.CacheTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_cache");
+
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_vtable");
+ }
+ assert(ID->getClassInterface() &&
+ "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+ // FIXME: Is this correct (that meta class size is never computed)?
+ uint32_t InstanceStart =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ uint32_t InstanceSize = InstanceStart;
+ uint32_t flags = CLS_META;
+ std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
+ std::string ObjCClassName(getClassSymbolPrefix());
+
+ llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+ bool classIsHidden =
+ CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->getNumIvarInitializers())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+ if (!ID->getClassInterface()->getSuperClass()) {
+ // class is root
+ flags |= CLS_ROOT;
+ SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
+ IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+ } else {
+ // Has a root. Current class is not a root.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
+ if (Root->hasAttr<WeakImportAttr>())
+ IsAGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ // work on super class metadata symbol.
+ std::string SuperClassName =
+ ObjCMetaClassName +
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(SuperClassName);
+ if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,ID);
+ std::string TClassName = ObjCMetaClassName + ClassName;
+ llvm::GlobalVariable *MetaTClass =
+ BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedMetaClasses.push_back(MetaTClass);
+
+ // Metadata for the class
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->getNumIvarInitializers())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
+ flags |= CLS_EXCEPTION;
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ flags |= CLS_ROOT;
+ SuperClassGV = 0;
+ } else {
+ // Has a root. Current class is not a root.
+ std::string RootClassName =
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
+ if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+ CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,
+ ID);
+
+ TClassName = ObjCClassName + ClassName;
+ llvm::GlobalVariable *ClassMD =
+ BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedClasses.push_back(ClassMD);
+
+ // Determine if this class is also "non-lazy".
+ if (ImplementationIsNonLazy(ID))
+ DefinedNonLazyClasses.push_back(ClassMD);
+
+ // Force the definition of the EHType if necessary.
+ if (flags & CLS_EXCEPTION)
+ GetInterfaceEHType(ID->getClassInterface(), true);
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+/// @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+
+ // This routine is called for @protocol only. So, we must build definition
+ // of protocol's meta-data (not a reference to it!)
+ //
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+
+ std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ ProtocolName += PD->getNameAsCString();
+
+ llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+ if (PTGV)
+ return Builder.CreateLoad(PTGV, "tmp");
+ PTGV = new llvm::GlobalVariable(
+ CGM.getModule(),
+ Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ProtocolName);
+ PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Builder.CreateLoad(PTGV, "tmp");
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+/// const char * const name;
+/// struct _class_t *const cls;
+/// const struct _method_list_t * const instance_methods;
+/// const struct _method_list_t * const class_methods;
+/// const struct _protocol_list_t * const protocols;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+ std::string ExtCatName(Prefix + Interface->getNameAsString()+
+ "_$_" + OCD->getNameAsString());
+ std::string ExtClassName(getClassSymbolPrefix() +
+ Interface->getNameAsString());
+
+ std::vector<llvm::Constant*> Values(6);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ // meta-class entry symbol
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
+ if (Interface->hasAttr<WeakImportAttr>())
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ Values[1] = ClassGV;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName(Prefix);
+ MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
+ "_$_" + OCD->getNameAsString();
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[2] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+
+ MethodListName = Prefix;
+ MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString();
+ Methods.clear();
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[3] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ llvm::SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << "_$_"
+ << OCD->getName();
+ Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ + Interface->getName() + "_$_"
+ + Category->getName(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+ Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+ Values);
+ llvm::GlobalVariable *GCATV
+ = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
+ false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ ExtCatName);
+ GCATV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.CategorynfABITy));
+ GCATV->setSection("__DATA, __objc_const");
+ CGM.AddUsedGlobal(GCATV);
+ DefinedCategories.push_back(GCATV);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(OCD))
+ DefinedNonLazyCategories.push_back(GCATV);
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+ const ObjCMethodDecl *MD) {
+ // FIXME: Use DenseMap::lookup
+ llvm::Function *Fn = MethodDefinitions[MD];
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ // sizeof(struct _objc_method)
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ // method_count
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name);
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(Section);
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+ std::string Name = "OBJC_IVAR_$_" + Container->getNameAsString() +
+ '.' + Ivar->getNameAsString();
+ llvm::GlobalVariable *IvarOffsetGV =
+ CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV)
+ IvarOffsetGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.LongTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ return IvarOffsetGV;
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int Offset) {
+ llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+ IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
+ Offset));
+ IvarOffsetGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.LongTy));
+
+ // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+ // well (i.e., in ObjCIvarOffsetVariable).
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ CGM.getDeclVisibilityMode(ID) == LangOptions::Hidden)
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ else
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ IvarOffsetGV->setSection("__DATA, __objc_const");
+ return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// char *name;
+/// char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _iver_t list[count];
+/// }
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+ const ObjCImplementationDecl *ID) {
+
+ std::vector<llvm::Constant*> Ivars, Ivar(5);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+ // FIXME. Consolidate this with similar code in GenerateClass.
+
+ // Collect declared and synthesized ivars in a small vector.
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD));
+ Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[2] = GetMethodVarType(IVD);
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(IVD->getType());
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Align = CGM.getContext().getPreferredTypeAlign(
+ IVD->getType().getTypePtr()) >> 3;
+ Align = llvm::Log2_32(Align);
+ Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ // NOTE. Size of a bitfield does not match gcc's, because of the
+ // way bitfields are treated special in each. But I am told that
+ // 'size' for bitfield ivars is ignored by the runtime so it does
+ // not matter. If it matters, there is enough info to get the
+ // bitfield right!
+ Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ }
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ std::vector<llvm::Constant*> Values(3);
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+ Ivars.size());
+ Values[2] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Prefix + OID->getName());
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection("__DATA, __objc_const");
+
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char * const protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t * const instance_methods;
+/// const struct method_list_t * const class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(10);
+ // isa is NULL
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+
+ Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ InstanceMethods);
+ Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ ClassMethods);
+ Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptInstanceMethods);
+ Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptClassMethods);
+ Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getName(),
+ 0, PD, ObjCTypes);
+ uint32_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Init,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(Entry);
+
+ // Use this protocol meta-data to build protocol list table in section
+ // __DATA, __objc_protolist
+ llvm::GlobalVariable *PTGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Entry,
+ "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
+ PTGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ // Just return null for empty protocol lists
+ if (begin == end)
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+ // FIXME: We shouldn't need to do this lookup here, should we?
+ llvm::SmallString<256> TmpName;
+ Name.toVector(TmpName);
+ llvm::GlobalVariable *GV =
+ CGM.getModule().getGlobalVariable(TmpName.str(), true);
+ if (GV)
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(
+ ObjCTypes.ProtocolnfABIPtrTy));
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] =
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[1] =
+ llvm::ConstantArray::get(
+ llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name);
+ GV->setSection("__DATA, __objc_const");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(3);
+ Desc[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+ CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar");
+}
+
+CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs) {
+ // FIXME. Even though IsSuper is passes. This function doese not handle calls
+ // to 'super' receivers.
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::Value *Arg0 = Receiver;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+
+ // Find the message function name.
+ // FIXME. This is too much work to get the ABI-specific result type needed to
+ // find the message name.
+ const CGFunctionInfo &FnInfo
+ = Types.getFunctionInfo(ResultType, CallArgList(),
+ FunctionType::ExtInfo());
+ llvm::Constant *Fn = 0;
+ std::string Name("\01l_");
+ if (CGM.ReturnTypeUsesSret(FnInfo)) {
+#if 0
+ // unlike what is documented. gcc never generates this API!!
+ if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+ Fn = ObjCTypes.getMessageSendIdStretFixupFn();
+ // FIXME. Is there a better way of getting these names.
+ // They are available in RuntimeFunctions vector pair.
+ Name += "objc_msgSendId_stret_fixup";
+ } else
+#endif
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ Name += "objc_msgSendSuper2_stret_fixup";
+ } else {
+ Fn = ObjCTypes.getMessageSendStretFixupFn();
+ Name += "objc_msgSend_stret_fixup";
+ }
+ } else if (!IsSuper && ResultType->isFloatingType()) {
+ if (ResultType->isSpecificBuiltinType(BuiltinType::LongDouble)) {
+ Fn = ObjCTypes.getMessageSendFpretFixupFn();
+ Name += "objc_msgSend_fpret_fixup";
+ } else {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
+ } else {
+#if 0
+// unlike what is documented. gcc never generates this API!!
+ if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+ Fn = ObjCTypes.getMessageSendIdFixupFn();
+ Name += "objc_msgSendId_fixup";
+ } else
+#endif
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ Name += "objc_msgSendSuper2_fixup";
+ } else {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
+ }
+ assert(Fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ Name += '_';
+ std::string SelName(Sel.getAsString());
+ // Replace all ':' in selector name with '_' ouch!
+ for (unsigned i = 0; i < SelName.size(); i++)
+ if (SelName[i] == ':')
+ SelName[i] = '_';
+ Name += SelName;
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (!GV) {
+ // Build message ref table entry.
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = Fn;
+ Values[1] = GetMethodVarName(Sel);
+ llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ Name);
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setAlignment(16);
+ GV->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+ llvm::Value *Arg1 = CGF.Builder.CreateBitCast(GV, ObjCTypes.MessageRefPtrTy);
+
+ CallArgList ActualArgs;
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg1),
+ ObjCTypes.MessageRefCPtrTy));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs,
+ FunctionType::ExtInfo());
+ llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
+ Callee = CGF.Builder.CreateLoad(Callee);
+ const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
+ Callee = CGF.Builder.CreateBitCast(Callee,
+ llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo1, Callee, Return, ActualArgs);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return LegacyDispatchedSelector(Sel)
+ ? EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes)
+ : EmitMessageSend(CGF, Return, ResultType, Sel,
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+ if (!GV) {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ 0, Name);
+ }
+
+ return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+ if (Entry)
+ return Builder.CreateLoad(Entry, "tmp");
+
+ std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ MetaClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ if (ID->hasAttr<WeakImportAttr>()) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+
+ return EmitClassRef(Builder, ID);
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // ...
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to "super' in a class method defined in
+ // a category implementation.
+ Target = EmitClassRef(CGF.Builder, Class);
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else
+ Target = EmitMetaClassRef(CGF.Builder, Class);
+ } else
+ Target = EmitSuperClassRef(CGF.Builder, Class);
+
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ return (LegacyDispatchedSelector(Sel))
+ ? EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes)
+ : EmitMessageSend(CGF, Return, ResultType, Sel,
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
+ Selector Sel) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
+ Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, "tmp");
+}
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src,
+ llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ QualType Ty) {
+ // Get size info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
+ unsigned long size = TypeInfo.first/8;
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, N);
+ return;
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ return;
+}
+
+void
+CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can safely avoid a temp here because jumps into
+ // @synchronized are illegal & this will dominate uses.
+ llvm::Value *SyncArg = 0;
+ if (!isTry) {
+ SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ }
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ CGF.setInvokeDest(TryHandler);
+
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the exception handler.
+
+ CGF.EmitBlock(TryHandler);
+
+ llvm::Value *llvm_eh_exception =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ llvm::Value *llvm_eh_typeid_for =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+ SelectorArgs.push_back(Exc);
+ SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
+
+ // Construct the lists of (type, catch body) to handle.
+ llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
+ bool HasCatchAll = false;
+ if (isTry) {
+ const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
+ for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
+
+ // catch(...) always matches.
+ if (!CatchDecl) {
+ // Use i8* null here to signal this is a catch all, not a cleanup.
+ llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ SelectorArgs.push_back(Null);
+ HasCatchAll = true;
+ break;
+ }
+
+ if (CatchDecl->getType()->isObjCIdType() ||
+ CatchDecl->getType()->isObjCQualifiedIdType()) {
+ llvm::Value *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id");
+ SelectorArgs.push_back(IDEHType);
+ } else {
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *PT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
+ SelectorArgs.push_back(EHType);
+ }
+ }
+ }
+
+ // We use a cleanup unless there was already a catch all.
+ if (!HasCatchAll) {
+ // Even though this is a cleanup, treat it as a catch all to avoid the C++
+ // personality behavior of terminating the process if only cleanups are
+ // found in the exception handling stack.
+ SelectorArgs.push_back(llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy));
+ Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ }
+
+ llvm::Value *Selector =
+ CGF.Builder.CreateCall(llvm_eh_selector,
+ SelectorArgs.begin(), SelectorArgs.end(),
+ "selector");
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ const VarDecl *CatchParam = Handlers[i].first;
+ const Stmt *CatchBody = Handlers[i].second;
+
+ llvm::BasicBlock *Next = 0;
+
+ // The last handler always matches.
+ if (i + 1 != e) {
+ assert(CatchParam && "Only last handler can be a catch all.");
+
+ llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+ Next = CGF.createBasicBlock("catch.next");
+ llvm::Value *Id =
+ CGF.Builder.CreateCall(llvm_eh_typeid_for,
+ CGF.Builder.CreateBitCast(SelectorArgs[i+2],
+ ObjCTypes.Int8PtrTy));
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id),
+ Match, Next);
+
+ CGF.EmitBlock(Match);
+ }
+
+ if (CatchBody) {
+ llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
+
+ // Cleanups must call objc_end_catch.
+ CGF.PushCleanupBlock(MatchEnd);
+
+ llvm::Value *ExcObject =
+ CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
+
+ // Bind the catch parameter if it exists.
+ if (CatchParam) {
+ ExcObject =
+ CGF.Builder.CreateBitCast(ExcObject,
+ CGF.ConvertType(CatchParam->getType()));
+ // CatchParam is a ParmVarDecl because of the grammar
+ // construction used to handle this, but for codegen purposes
+ // we treat this as a local decl.
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ // Exceptions inside the catch block must be rethrown. We set a special
+ // purpose invoke destination for this which just collects the thrown
+ // exception and overwrites the object in RethrowPtr, branches through the
+ // match.end to make sure we call objc_end_catch, before branching to the
+ // rethrow handler.
+ llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
+ CGF.setInvokeDest(MatchHandler);
+ CGF.ObjCEHValueStack.push_back(ExcObject);
+ CGF.EmitStmt(CatchBody);
+ CGF.ObjCEHValueStack.pop_back();
+ CGF.setInvokeDest(0);
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Don't emit the extra match handler if there we no unprotected calls in
+ // the catch block.
+ if (MatchHandler->use_empty()) {
+ delete MatchHandler;
+ } else {
+ CGF.EmitBlock(MatchHandler);
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ CGF.Builder.CreateCall3(llvm_eh_selector,
+ Exc, ObjCTypes.getEHPersonalityPtr(),
+ llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0),
+ "unused_eh_selector");
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.EmitBlock(MatchEnd);
+
+ // Unfortunately, we also have to generate another EH frame here
+ // in case this throws.
+ llvm::BasicBlock *MatchEndHandler =
+ CGF.createBasicBlock("match.end.handler");
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
+ Cont, MatchEndHandler);
+
+ CGF.EmitBlock(Cont);
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ CGF.EmitBlock(MatchEndHandler);
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ CGF.Builder.CreateCall3(llvm_eh_selector,
+ Exc, ObjCTypes.getEHPersonalityPtr(),
+ llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0),
+ "unused_eh_selector");
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (Next)
+ CGF.EmitBlock(Next);
+ } else {
+ assert(!Next && "catchup should be last handler.");
+
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+
+ // Pop the cleanup entry, the @finally is outside this cleanup
+ // scope.
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+ CGF.setInvokeDest(PrevLandingPad);
+
+ CGF.EmitBlock(FinallyBlock);
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ }
+
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ // Branch around the rethrow code.
+ CGF.EmitBranch(FinallyEnd);
+
+ // Generate the rethrow code, taking care to use an invoke if we are in a
+ // nested exception scope.
+ CGF.EmitBlock(FinallyRethrow);
+ if (PrevLandingPad) {
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getUnwindResumeOrRethrowFn(),
+ Cont, PrevLandingPad,
+ CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.EmitBlock(Cont);
+ } else {
+ CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr));
+ }
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *Exception;
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ Exception = CGF.EmitScalarExpr(ThrowExpr);
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ Exception = CGF.ObjCEHValueStack.back();
+ }
+
+ llvm::Value *ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
+ if (InvokeDest) {
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getExceptionThrowFn(),
+ Cont, InvokeDest,
+ &ExceptionAsObject, &ExceptionAsObject + 1);
+ CGF.EmitBlock(Cont);
+ } else
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition) {
+ llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+ // If we don't need a definition, return the entry if found or check
+ // if we use an external reference.
+ if (!ForDefinition) {
+ if (Entry)
+ return Entry;
+
+ // If this type (or a super class) has the __objc_exception__
+ // attribute, emit an external reference.
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID))
+ return Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ // Otherwise we need to either make a new entry or fill in the
+ // initializer.
+ assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string VTableName = "objc_ehtype_vtable";
+ llvm::GlobalVariable *VTableGV =
+ CGM.getModule().getGlobalVariable(VTableName);
+ if (!VTableGV)
+ VTableGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.Int8PtrTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, VTableName);
+
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
+ Values[1] = GetClassName(ID->getIdentifier());
+ Values[2] = GetClassGlobal(ClassName);
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+ if (Entry) {
+ Entry->setInitializer(Init);
+ } else {
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.EHTypeTy));
+
+ if (ForDefinition) {
+ Entry->setSection("__DATA,__objc_const");
+ Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCMac(CGM);
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacNonFragileABIObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCNonFragileABIMac(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..8de7f10
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,221 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation. Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
+#define CLANG_CODEGEN_OBCJRUNTIME_H
+#include "clang/Basic/IdentifierTable.h" // Selector
+#include "clang/AST/DeclObjC.h"
+#include <string>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Constant;
+ class Function;
+ class Module;
+ class StructLayout;
+ class StructType;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenFunction;
+}
+
+ class FieldDecl;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCContainerDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCProtocolDecl;
+ class Selector;
+ class ObjCIvarDecl;
+ class ObjCStringLiteral;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+protected:
+ // Utility functions for unified ivar access. These need to
+ // eventually be folded into other places (the structure layout
+ // code).
+
+ /// Compute an offset to the given ivar, suitable for passing to
+ /// EmitValueForIvarAtOffset. Note that the correct handling of
+ /// bit-fields is carefully coordinated by these two, use caution!
+ ///
+ /// The latter overload is suitable for computing the offset of a
+ /// sythesized ivar.
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar);
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar);
+
+ LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset);
+
+public:
+ virtual ~CGObjCRuntime();
+
+ /// Generate the function required to register all Objective-C components in
+ /// this compilation unit with the runtime library.
+ virtual llvm::Function *ModuleInitFunction() = 0;
+
+ /// Get a selector for the specified name and type values. The
+ /// return value should have the LLVM type for pointer-to
+ /// ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ Selector Sel) = 0;
+
+ /// Get a typed selector.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method) = 0;
+
+ /// Generate a constant string object.
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
+
+ /// Generate a category. A category contains a list of methods (and
+ /// accompanying metadata) and a list of protocols.
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+ /// Generate a class stucture for this class.
+ virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+ /// Generate an Objective-C message send operation.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class = 0,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Generate an Objective-C message send operation to the super
+ /// class initiated in a method for Class and with the given Self
+ /// object.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Self,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Emit the code to return the named protocol as an object, as in a
+ /// @protocol expression.
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
+ virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate a function preamble for a method with the specified
+ /// types.
+
+ // FIXME: Current this just generates the Function definition, but really this
+ // should also be generating the loads of the parameters, as the runtime
+ // should have full control over how parameters are passed.
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) = 0;
+
+ /// Return the runtime function for getting properties.
+ virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+ /// Return the runtime function for setting properties.
+ virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+ // API for atomic copying of qualified aggregates in setter/getter.
+ virtual llvm::Constant *GetCopyStructFunction() = 0;
+
+ /// GetClass - Return a reference to the class for the given
+ /// interface decl.
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) = 0;
+
+ /// EnumerationMutationFunction - Return the function that's called by the
+ /// compiler when a mutation is detected during foreach iteration.
+ virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) = 0;
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) = 0;
+ virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) = 0;
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset) = 0;
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) = 0;
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) = 0;
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ QualType Ty) = 0;
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacNonFragileABIObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
new file mode 100644
index 0000000..aec1c45
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -0,0 +1,827 @@
+//===--- CGCXXRTTI.cpp - Emit LLVM Code for C++ RTTI descriptors ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of RTTI descriptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Type.h"
+#include "clang/AST/RecordLayout.h"
+#include "CodeGenModule.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class RTTIBuilder {
+ CodeGenModule &CGM; // Per-module state.
+ llvm::LLVMContext &VMContext;
+
+ const llvm::Type *Int8PtrTy;
+
+ /// Fields - The fields of the RTTI descriptor currently being built.
+ llvm::SmallVector<llvm::Constant *, 16> Fields;
+
+ /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
+ /// descriptor of the given type.
+ llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
+
+ /// BuildVTablePointer - Build the vtable pointer for the given type.
+ void BuildVTablePointer(const Type *Ty);
+
+ /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+ /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
+ void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+ /// classes with bases that do not satisfy the abi::__si_class_type_info
+ /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+ void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
+ /// for pointer types.
+ void BuildPointerTypeInfo(const PointerType *Ty);
+
+ /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+ /// struct, used for member pointer types.
+ void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
+
+public:
+ RTTIBuilder(CodeGenModule &cgm)
+ : CGM(cgm), VMContext(cgm.getModule().getContext()),
+ Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
+
+ llvm::Constant *BuildName(QualType Ty, bool Hidden,
+ llvm::GlobalVariable::LinkageTypes Linkage) {
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXRTTIName(Ty, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *OGV = CGM.getModule().getNamedGlobal(Name);
+ if (OGV && !OGV->isDeclaration())
+ return llvm::ConstantExpr::getBitCast(OGV, Int8PtrTy);
+
+ llvm::Constant *C = llvm::ConstantArray::get(VMContext, Name.substr(4));
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, Linkage,
+ C, Name);
+ if (OGV) {
+ GV->takeName(OGV);
+ llvm::Constant *NewPtr = llvm::ConstantExpr::getBitCast(GV,
+ OGV->getType());
+ OGV->replaceAllUsesWith(NewPtr);
+ OGV->eraseFromParent();
+ }
+ if (Hidden)
+ GV->setVisibility(llvm::GlobalVariable::HiddenVisibility);
+ return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+ }
+
+ // FIXME: unify with DecideExtern
+ bool DecideHidden(QualType Ty) {
+ // For this type, see if all components are never hidden.
+ if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
+ return (DecideHidden(MPT->getPointeeType())
+ && DecideHidden(QualType(MPT->getClass(), 0)));
+ if (const PointerType *PT = Ty->getAs<PointerType>())
+ return DecideHidden(PT->getPointeeType());
+ if (const FunctionType *FT = Ty->getAs<FunctionType>()) {
+ if (DecideHidden(FT->getResultType()) == false)
+ return false;
+ if (const FunctionProtoType *FPT = Ty->getAs<FunctionProtoType>()) {
+ for (unsigned i = 0; i <FPT->getNumArgs(); ++i)
+ if (DecideHidden(FPT->getArgType(i)) == false)
+ return false;
+ for (unsigned i = 0; i <FPT->getNumExceptions(); ++i)
+ if (DecideHidden(FPT->getExceptionType(i)) == false)
+ return false;
+ return true;
+ }
+ }
+ if (const RecordType *RT = Ty->getAs<RecordType>())
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ return CGM.getDeclVisibilityMode(RD) == LangOptions::Hidden;
+ return false;
+ }
+
+ // Pointer type info flags.
+ enum {
+ /// PTI_Const - Type has const qualifier.
+ PTI_Const = 0x1,
+
+ /// PTI_Volatile - Type has volatile qualifier.
+ PTI_Volatile = 0x2,
+
+ /// PTI_Restrict - Type has restrict qualifier.
+ PTI_Restrict = 0x4,
+
+ /// PTI_Incomplete - Type is incomplete.
+ PTI_Incomplete = 0x8,
+
+ /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+ /// (in pointer to member).
+ PTI_ContainingClassIncomplete = 0x10
+ };
+
+ // VMI type info flags.
+ enum {
+ /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+ VMI_NonDiamondRepeat = 0x1,
+
+ /// VMI_DiamondShaped - Class is diamond shaped.
+ VMI_DiamondShaped = 0x2
+ };
+
+ // Base class type info flags.
+ enum {
+ /// BCTI_Virtual - Base class is virtual.
+ BCTI_Virtual = 0x1,
+
+ /// BCTI_Public - Base class is public.
+ BCTI_Public = 0x2
+ };
+
+ /// BuildTypeInfo - Build the RTTI type info struct for the given type.
+ ///
+ /// \param Force - true to force the creation of this RTTI value
+ /// \param ForEH - true if this is for exception handling
+ llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
+};
+}
+
+llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
+ // Mangle the RTTI name.
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ // Look for an existing global.
+ llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
+
+ if (!GV) {
+ // Create a new global variable.
+ GV = new llvm::GlobalVariable(CGM.getModule(), Int8PtrTy, /*Constant=*/true,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+
+ return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+}
+
+/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
+ // Itanium C++ ABI 2.9.2:
+ // Basic type information (e.g. for "int", "bool", etc.) will be kept in
+ // the run-time support library. Specifically, the run-time support
+ // library should contain type_info objects for the types X, X* and
+ // X const*, for every X in: void, bool, wchar_t, char, unsigned char,
+ // signed char, short, unsigned short, int, unsigned int, long,
+ // unsigned long, long long, unsigned long long, float, double, long double,
+ // char16_t, char32_t, and the IEEE 754r decimal and half-precision
+ // floating point types.
+ switch (Ty->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::Bool:
+ case BuiltinType::WChar:
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return true;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::UndeducedAuto:
+ assert(false && "Should not see this type here!");
+
+ case BuiltinType::NullPtr:
+ assert(false && "FIXME: nullptr_t is not handled!");
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ assert(false && "FIXME: Objective-C types are unsupported!");
+ }
+
+ // Silent gcc.
+ return false;
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
+ if (!BuiltinTy)
+ return false;
+
+ // Check the qualifiers.
+ Qualifiers Quals = PointeeTy.getQualifiers();
+ Quals.removeConst();
+
+ if (!Quals.empty())
+ return false;
+
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit.
+static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
+ QualType Ty) {
+ // Type info for builtin types is defined in the standard library.
+ if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+
+ // Type info for some pointer types to builtin types is defined in the
+ // standard library.
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return TypeInfoIsInStandardLibrary(PointerTy);
+
+ // If RTTI is disabled, don't consider key functions.
+ if (!Context.getLangOptions().RTTI) return false;
+
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!RD->hasDefinition())
+ return false;
+
+ if (!RD->isDynamicClass())
+ return false;
+
+ // Get the key function.
+ const CXXMethodDecl *KeyFunction = RD->getASTContext().getKeyFunction(RD);
+ if (KeyFunction && !KeyFunction->getBody()) {
+ // The class has a key function, but it is not defined in this translation
+ // unit, so we should use the external descriptor for it.
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is incomplete.
+static bool IsIncompleteClassType(const RecordType *RecordTy) {
+ return !RecordTy->getDecl()->isDefinition();
+}
+
+/// ContainsIncompleteClassType - Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+/// * The given type is an incomplete class type.
+/// * The given type is a pointer type whose pointee type contains an
+/// incomplete class type.
+/// * The given type is a member pointer type whose class is an incomplete
+/// class type.
+/// * The given type is a member pointer type whoise pointee type contains an
+/// incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType Ty) {
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ if (IsIncompleteClassType(RecordTy))
+ return true;
+ }
+
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return ContainsIncompleteClassType(PointerTy->getPointeeType());
+
+ if (const MemberPointerType *MemberPointerTy =
+ dyn_cast<MemberPointerType>(Ty)) {
+ // Check if the class type is incomplete.
+ const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
+ if (IsIncompleteClassType(ClassType))
+ return true;
+
+ return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
+ }
+
+ return false;
+}
+
+/// getTypeInfoLinkage - Return the linkage that the type info and type info
+/// name constants should have for the given type.
+static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
+ // Itanium C++ ABI 2.9.5p7:
+ // In addition, it and all of the intermediate abi::__pointer_type_info
+ // structs in the chain down to the abi::__class_type_info for the
+ // incomplete class type must be prevented from resolving to the
+ // corresponding type_info structs for the complete class type, possibly
+ // by making them local static objects. Finally, a dummy class RTTI is
+ // generated for the incomplete type that will not resolve to the final
+ // complete class RTTI (because the latter need not exist), possibly by
+ // making it a local static object.
+ if (ContainsIncompleteClassType(Ty))
+ return llvm::GlobalValue::InternalLinkage;
+
+ switch (Ty->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return llvm::GlobalValue::InternalLinkage;
+
+ case ExternalLinkage:
+ if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->isDynamicClass())
+ return CodeGenModule::getVTableLinkage(RD);
+ }
+
+ return llvm::GlobalValue::WeakODRLinkage;
+ }
+
+ return llvm::GlobalValue::WeakODRLinkage;
+}
+
+// CanUseSingleInheritance - Return whether the given record decl has a "single,
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
+ // Check the number of bases.
+ if (RD->getNumBases() != 1)
+ return false;
+
+ // Get the base.
+ CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
+
+ // Check that the base is not virtual.
+ if (Base->isVirtual())
+ return false;
+
+ // Check that the base is public.
+ if (Base->getAccessSpecifier() != AS_public)
+ return false;
+
+ // Check that the class is dynamic iff the base is.
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseDecl->isEmpty() &&
+ BaseDecl->isDynamicClass() != RD->isDynamicClass())
+ return false;
+
+ return true;
+}
+
+void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
+ const char *VTableName;
+
+ switch (Ty->getTypeClass()) {
+ default: assert(0 && "Unhandled type!");
+
+ case Type::Builtin:
+ // GCC treats vector types as fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ // abi::__fundamental_type_info.
+ VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // abi::__array_type_info.
+ VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // abi::__function_type_info.
+ VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+ break;
+
+ case Type::Enum:
+ // abi::__enum_type_info.
+ VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ // abi::__class_type_info.
+ VTableName = "_ZTVN10__cxxabiv117__class_type_infoE";
+ } else if (CanUseSingleInheritance(RD)) {
+ // abi::__si_class_type_info.
+ VTableName = "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ } else {
+ // abi::__vmi_class_type_info.
+ VTableName = "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+ }
+
+ break;
+ }
+
+ case Type::Pointer:
+ // abi::__pointer_type_info.
+ VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+ break;
+
+ case Type::MemberPointer:
+ // abi::__pointer_to_member_type_info.
+ VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+ break;
+ }
+
+ llvm::Constant *VTable =
+ CGM.getModule().getOrInsertGlobal(VTableName, Int8PtrTy);
+
+ const llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ // The vtable address point is 2.
+ llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, &Two, 1);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, Int8PtrTy);
+
+ Fields.push_back(VTable);
+}
+
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty,
+ bool Force) {
+ // We want to operate on the canonical type.
+ Ty = CGM.getContext().getCanonicalType(Ty);
+
+ // Check if we've already emitted an RTTI descriptor for this type.
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXRTTI(Ty, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
+ if (OldGV && !OldGV->isDeclaration())
+ return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
+
+ // Check if there is already an external RTTI descriptor for this type.
+ if (!Force && ShouldUseExternalRTTIDescriptor(CGM.getContext(), Ty))
+ return GetAddrOfExternalRTTIDescriptor(Ty);
+
+ llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(Ty);
+
+ // Add the vtable pointer.
+ BuildVTablePointer(cast<Type>(Ty));
+
+ // And the name.
+ Fields.push_back(BuildName(Ty, DecideHidden(Ty), Linkage));
+
+ switch (Ty->getTypeClass()) {
+ default: assert(false && "Unhandled type class!");
+
+ // GCC treats vector types as fundamental types.
+ case Type::Builtin:
+ case Type::Vector:
+ case Type::ExtVector:
+ // Itanium C++ ABI 2.9.5p4:
+ // abi::__fundamental_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__array_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__function_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Enum:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__enum_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ // We don't need to emit any fields.
+ break;
+ }
+
+ if (CanUseSingleInheritance(RD))
+ BuildSIClassTypeInfo(RD);
+ else
+ BuildVMIClassTypeInfo(RD);
+
+ break;
+ }
+
+ case Type::Pointer:
+ BuildPointerTypeInfo(cast<PointerType>(Ty));
+ break;
+
+ case Type::MemberPointer:
+ BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
+ break;
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(VMContext, &Fields[0], Fields.size(),
+ /*Packed=*/false);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ /*Constant=*/true, Linkage, Init, Name);
+
+ // If there's already an old global variable, replace it with the new one.
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+
+ return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+}
+
+/// ComputeQualifierFlags - Compute the pointer type info flags from the
+/// given qualifier.
+static unsigned ComputeQualifierFlags(Qualifiers Quals) {
+ unsigned Flags = 0;
+
+ if (Quals.hasConst())
+ Flags |= RTTIBuilder::PTI_Const;
+ if (Quals.hasVolatile())
+ Flags |= RTTIBuilder::PTI_Volatile;
+ if (Quals.hasRestrict())
+ Flags |= RTTIBuilder::PTI_Restrict;
+
+ return Flags;
+}
+
+/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
+void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
+ // Itanium C++ ABI 2.9.5p6b:
+ // It adds to abi::__class_type_info a single member pointing to the
+ // type_info structure for the base type,
+ llvm::Constant *BaseTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(RD->bases_begin()->getType());
+ Fields.push_back(BaseTypeInfo);
+}
+
+/// SeenBases - Contains virtual and non-virtual bases seen when traversing
+/// a class hierarchy.
+struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+};
+
+/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
+/// abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
+ SeenBases &Bases) {
+
+ unsigned Flags = 0;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual()) {
+ if (Bases.VirtualBases.count(BaseDecl)) {
+ // If this virtual base has been seen before, then the class is diamond
+ // shaped.
+ Flags |= RTTIBuilder::VMI_DiamondShaped;
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the virtual base as seen.
+ Bases.VirtualBases.insert(BaseDecl);
+ }
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl)) {
+ // If this non-virtual base has been seen before, then the class has non-
+ // diamond shaped repeated inheritance.
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+ } else {
+ if (Bases.VirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the non-virtual base as seen.
+ Bases.NonVirtualBases.insert(BaseDecl);
+ }
+ }
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = BaseDecl->bases_begin(),
+ E = BaseDecl->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
+ unsigned Flags = 0;
+ SeenBases Bases;
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info
+/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
+ const llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __flags is a word with flags describing details about the class
+ // structure, which may be referenced by using the __flags_masks
+ // enumeration. These flags refer to both direct and indirect bases.
+ unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_count is a word with the number of direct proper base class
+ // descriptions that follow.
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
+
+ if (!RD->getNumBases())
+ return;
+
+ const llvm::Type *LongLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy);
+
+ // Now add the base class descriptions.
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_info[] is an array of base class descriptions -- one for every
+ // direct proper base. Each description is of the type:
+ //
+ // struct abi::__base_class_type_info {
+ // public:
+ // const __class_type_info *__base_type;
+ // long __offset_flags;
+ //
+ // enum __offset_flags_masks {
+ // __virtual_mask = 0x1,
+ // __public_mask = 0x2,
+ // __offset_shift = 8
+ // };
+ // };
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier *Base = I;
+
+ // The __base_type member points to the RTTI for the base type.
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base->getType()));
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ int64_t OffsetFlags = 0;
+
+ // All but the lower 8 bits of __offset_flags are a signed offset.
+ // For a non-virtual base, this is the offset in the object of the base
+ // subobject. For a virtual base, this is the offset in the virtual table of
+ // the virtual base offset for the virtual base referenced (negative).
+ if (Base->isVirtual())
+ OffsetFlags = CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ else {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8;
+ };
+
+ OffsetFlags <<= 8;
+
+ // The low-order byte of __offset_flags contains flags, as given by the
+ // masks from the enumeration __offset_flags_masks.
+ if (Base->isVirtual())
+ OffsetFlags |= BCTI_Virtual;
+ if (Base->getAccessSpecifier() == AS_public)
+ OffsetFlags |= BCTI_Public;
+
+ Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
+ }
+}
+
+/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
+/// used for pointer types.
+void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to
+ unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(PointeeTy))
+ Flags |= PTI_Incomplete;
+
+ const llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+ Fields.push_back(PointeeTypeInfo);
+}
+
+/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+/// struct, used for member pointer types.
+void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to.
+ unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+
+ const RecordType *ClassType = cast<RecordType>(Ty->getClass());
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(PointeeTy))
+ Flags |= PTI_Incomplete;
+
+ if (IsIncompleteClassType(ClassType))
+ Flags |= PTI_ContainingClassIncomplete;
+
+ const llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+ Fields.push_back(PointeeTypeInfo);
+
+ // Itanium C++ ABI 2.9.5p9:
+ // __context is a pointer to an abi::__class_type_info corresponding to the
+ // class type containing the member pointed to
+ // (e.g., the "A" in "int A::*").
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(QualType(ClassType, 0)));
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
+ bool ForEH) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!ForEH && !getContext().getLangOptions().RTTI) {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ return llvm::Constant::getNullValue(Int8PtrTy);
+ }
+
+ return RTTIBuilder(*this).BuildTypeInfo(Ty);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
+ QualType PointerType = Context.getPointerType(Type);
+ QualType PointerTypeConst = Context.getPointerType(Type.withConst());
+ RTTIBuilder(*this).BuildTypeInfo(Type, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerType, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptors() {
+ QualType FundamentalTypes[] = { Context.VoidTy, Context.Char32Ty,
+ Context.Char16Ty, Context.UnsignedLongLongTy,
+ Context.LongLongTy, Context.WCharTy,
+ Context.UnsignedShortTy, Context.ShortTy,
+ Context.UnsignedLongTy, Context.LongTy,
+ Context.UnsignedIntTy, Context.IntTy,
+ Context.UnsignedCharTy, Context.FloatTy,
+ Context.LongDoubleTy, Context.DoubleTy,
+ Context.CharTy, Context.BoolTy,
+ Context.SignedCharTy };
+ for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
+ EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
new file mode 100644
index 0000000..e95591e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
@@ -0,0 +1,222 @@
+//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H
+#define CLANG_CODEGEN_CGRECORDLAYOUT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/Decl.h"
+namespace llvm {
+ class raw_ostream;
+ class Type;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// \brief Helper object for describing how to generate the code for access to a
+/// bit-field.
+///
+/// This structure is intended to describe the "policy" of how the bit-field
+/// should be accessed, which may be target, language, or ABI dependent.
+class CGBitFieldInfo {
+public:
+ /// Descriptor for a single component of a bit-field access. The entire
+ /// bit-field is constituted of a bitwise OR of all of the individual
+ /// components.
+ ///
+ /// Each component describes an accessed value, which is how the component
+ /// should be transferred to/from memory, and a target placement, which is how
+ /// that component fits into the constituted bit-field. The pseudo-IR for a
+ /// load is:
+ ///
+ /// %0 = gep %base, 0, FieldIndex
+ /// %1 = gep (i8*) %0, FieldByteOffset
+ /// %2 = (i(AccessWidth) *) %1
+ /// %3 = load %2, align AccessAlignment
+ /// %4 = shr %3, FieldBitStart
+ ///
+ /// and the composed bit-field is formed as the boolean OR of all accesses,
+ /// masked to TargetBitWidth bits and shifted to TargetBitOffset.
+ struct AccessInfo {
+ /// Offset of the field to load in the LLVM structure, if any.
+ unsigned FieldIndex;
+
+ /// Byte offset from the field address, if any. This should generally be
+ /// unused as the cleanest IR comes from having a well-constructed LLVM type
+ /// with proper GEP instructions, but sometimes its use is required, for
+ /// example if an access is intended to straddle an LLVM field boundary.
+ unsigned FieldByteOffset;
+
+ /// Bit offset in the accessed value to use. The width is implied by \see
+ /// TargetBitWidth.
+ unsigned FieldBitStart;
+
+ /// Bit width of the memory access to perform.
+ unsigned AccessWidth;
+
+ /// The alignment of the memory access, or 0 if the default alignment should
+ /// be used.
+ //
+ // FIXME: Remove use of 0 to encode default, instead have IRgen do the right
+ // thing when it generates the code, if avoiding align directives is
+ // desired.
+ unsigned AccessAlignment;
+
+ /// Offset for the target value.
+ unsigned TargetBitOffset;
+
+ /// Number of bits in the access that are destined for the bit-field.
+ unsigned TargetBitWidth;
+ };
+
+private:
+ /// The components to use to access the bit-field. We may need up to three
+ /// separate components to support up to i64 bit-field access (4 + 2 + 1 byte
+ /// accesses).
+ //
+ // FIXME: De-hardcode this, just allocate following the struct.
+ AccessInfo Components[3];
+
+ /// The total size of the bit-field, in bits.
+ unsigned Size;
+
+ /// The number of access components to use.
+ unsigned NumComponents;
+
+ /// Whether the bit-field is signed.
+ bool IsSigned : 1;
+
+public:
+ CGBitFieldInfo(unsigned Size, unsigned NumComponents, AccessInfo *_Components,
+ bool IsSigned) : Size(Size), NumComponents(NumComponents),
+ IsSigned(IsSigned) {
+ assert(NumComponents <= 3 && "invalid number of components!");
+ for (unsigned i = 0; i != NumComponents; ++i)
+ Components[i] = _Components[i];
+
+ // Check some invariants.
+ unsigned AccessedSize = 0;
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ AccessedSize += AI.TargetBitWidth;
+
+ // We shouldn't try to load 0 bits.
+ assert(AI.TargetBitWidth > 0);
+
+ // We can't load more bits than we accessed.
+ assert(AI.FieldBitStart + AI.TargetBitWidth <= AI.AccessWidth);
+
+ // We shouldn't put any bits outside the result size.
+ assert(AI.TargetBitWidth + AI.TargetBitOffset <= Size);
+ }
+
+ // Check that the total number of target bits matches the total bit-field
+ // size.
+ assert(AccessedSize == Size && "Total size does not match accessed size!");
+ }
+
+public:
+ /// \brief Check whether this bit-field access is (i.e., should be sign
+ /// extended on loads).
+ bool isSigned() const { return IsSigned; }
+
+ /// \brief Get the size of the bit-field, in bits.
+ unsigned getSize() const { return Size; }
+
+ /// @name Component Access
+ /// @{
+
+ unsigned getNumComponents() const { return NumComponents; }
+
+ const AccessInfo &getComponent(unsigned Index) const {
+ assert(Index < getNumComponents() && "Invalid access!");
+ return Components[Index];
+ }
+
+ /// @}
+
+ void print(llvm::raw_ostream &OS) const;
+ void dump() const;
+};
+
+/// CGRecordLayout - This class handles struct and union layout info while
+/// lowering AST types to LLVM types.
+///
+/// These layout objects are only created on demand as IR generation requires.
+class CGRecordLayout {
+ friend class CodeGenTypes;
+
+ CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT
+ void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
+
+private:
+ /// The LLVMType corresponding to this record layout.
+ const llvm::Type *LLVMType;
+
+ /// Map from (non-bit-field) struct field to the corresponding llvm struct
+ /// type field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// Map from (bit-field) struct field to the corresponding llvm struct type
+ /// field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ // FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
+ // map for both virtual and non virtual bases.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBaseFields;
+
+ /// Whether one of the fields in this record layout is a pointer to data
+ /// member, or a struct that contains pointer to data member.
+ bool ContainsPointerToDataMember : 1;
+
+public:
+ CGRecordLayout(const llvm::Type *T, bool ContainsPointerToDataMember)
+ : LLVMType(T), ContainsPointerToDataMember(ContainsPointerToDataMember) {}
+
+ /// \brief Return the LLVM type associated with this record.
+ const llvm::Type *getLLVMType() const {
+ return LLVMType;
+ }
+
+ /// \brief Check whether this struct contains pointers to data members.
+ bool containsPointerToDataMember() const {
+ return ContainsPointerToDataMember;
+ }
+
+ /// \brief Return llvm::StructType element number that corresponds to the
+ /// field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD) const {
+ assert(!FD->isBitField() && "Invalid call for bit-field decl!");
+ assert(FieldInfo.count(FD) && "Invalid field for record!");
+ return FieldInfo.lookup(FD);
+ }
+
+ unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
+ assert(NonVirtualBaseFields.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBaseFields.lookup(RD);
+ }
+
+ /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
+ assert(FD->isBitField() && "Invalid call for non bit-field decl!");
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
+ it = BitFields.find(FD);
+ assert(it != BitFields.end() && "Unable to find bitfield info");
+ return it->second;
+ }
+
+ void print(llvm::raw_ostream &OS) const;
+ void dump() const;
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
new file mode 100644
index 0000000..9f16875
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -0,0 +1,786 @@
+//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Builder implementation for CGRecordLayout objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGRecordLayout.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "CodeGenTypes.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace clang {
+namespace CodeGen {
+
+class CGRecordLayoutBuilder {
+public:
+ /// FieldTypes - Holds the LLVM types that the struct is created from.
+ std::vector<const llvm::Type *> FieldTypes;
+
+ /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
+ typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
+ llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
+
+ /// LLVMBitFieldInfo - Holds location and size information about a bit field.
+ typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
+ llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
+
+ typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
+ llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
+
+ /// ContainsPointerToDataMember - Whether one of the fields in this record
+ /// layout is a pointer to data member, or a struct that contains pointer to
+ /// data member.
+ bool ContainsPointerToDataMember;
+
+ /// Packed - Whether the resulting LLVM struct will be packed or not.
+ bool Packed;
+
+private:
+ CodeGenTypes &Types;
+
+ /// Alignment - Contains the alignment of the RecordDecl.
+ //
+ // FIXME: This is not needed and should be removed.
+ unsigned Alignment;
+
+ /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
+ /// LLVM types.
+ unsigned AlignmentAsLLVMStruct;
+
+ /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
+ /// this will have the number of bits still available in the field.
+ char BitsAvailableInLastField;
+
+ /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
+ uint64_t NextFieldOffsetInBytes;
+
+ /// LayoutUnionField - Will layout a field in an union and return the type
+ /// that the field will have.
+ const llvm::Type *LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutUnion - Will layout a union RecordDecl.
+ void LayoutUnion(const RecordDecl *D);
+
+ /// LayoutField - try to layout all fields in the record decl.
+ /// Returns false if the operation failed because the struct is not packed.
+ bool LayoutFields(const RecordDecl *D);
+
+ /// LayoutNonVirtualBase - layout a single non-virtual base.
+ void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ uint64_t BaseOffset);
+
+ /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
+ void LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutField - layout a single field. Returns false if the operation failed
+ /// because the current struct is not packed.
+ bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// LayoutBitField - layout a single bit field.
+ void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// AppendField - Appends a field with the given offset and type.
+ void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+
+ /// AppendPadding - Appends enough padding bytes so that the total
+ /// struct size is a multiple of the field alignment.
+ void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
+
+ /// AppendBytes - Append a given number of bytes to the record.
+ void AppendBytes(uint64_t NumBytes);
+
+ /// AppendTailPadding - Append enough tail padding so that the type will have
+ /// the passed size.
+ void AppendTailPadding(uint64_t RecordSize);
+
+ unsigned getTypeAlignment(const llvm::Type *Ty) const;
+
+ /// CheckForPointerToDataMember - Check if the given type contains a pointer
+ /// to data member.
+ void CheckForPointerToDataMember(QualType T);
+ void CheckForPointerToDataMember(const CXXRecordDecl *RD);
+
+public:
+ CGRecordLayoutBuilder(CodeGenTypes &Types)
+ : ContainsPointerToDataMember(false), Packed(false), Types(Types),
+ Alignment(0), AlignmentAsLLVMStruct(1),
+ BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
+
+ /// Layout - Will layout a RecordDecl.
+ void Layout(const RecordDecl *D);
+};
+
+}
+}
+
+void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
+ Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
+ Packed = D->hasAttr<PackedAttr>();
+
+ if (D->isUnion()) {
+ LayoutUnion(D);
+ return;
+ }
+
+ if (LayoutFields(D))
+ return;
+
+ // We weren't able to layout the struct. Try again with a packed struct
+ Packed = true;
+ AlignmentAsLLVMStruct = 1;
+ NextFieldOffsetInBytes = 0;
+ FieldTypes.clear();
+ LLVMFields.clear();
+ LLVMBitFields.clear();
+ LLVMNonVirtualBases.clear();
+
+ LayoutFields(D);
+}
+
+static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
+ uint64_t ContainingTypeSizeInBits = RL.getSize();
+ unsigned ContainingTypeAlign = RL.getAlignment();
+
+ const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
+ uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
+ uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
+
+ bool IsSigned = FD->getType()->isSignedIntegerType();
+
+ if (FieldSize > TypeSizeInBits) {
+ // We have a wide bit-field. The extra bits are only used for padding, so
+ // if we have a bitfield of type T, with size N:
+ //
+ // T t : N;
+ //
+ // We can just assume that it's:
+ //
+ // T t : sizeof(T);
+ //
+ FieldSize = TypeSizeInBits;
+ }
+
+ // Compute the access components. The policy we use is to start by attempting
+ // to access using the width of the bit-field type itself and to always access
+ // at aligned indices of that type. If such an access would fail because it
+ // extends past the bound of the type, then we reduce size to the next smaller
+ // power of two and retry. The current algorithm assumes pow2 sized types,
+ // although this is easy to fix.
+ //
+ // FIXME: This algorithm is wrong on big-endian systems, I think.
+ assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
+ CGBitFieldInfo::AccessInfo Components[3];
+ unsigned NumComponents = 0;
+ unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
+ unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
+
+ // Round down from the field offset to find the first access position that is
+ // at an aligned offset of the initial access type.
+ uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+
+ // Adjust initial access size to fit within record.
+ while (AccessWidth > 8 &&
+ AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ AccessWidth >>= 1;
+ AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+ }
+
+ while (AccessedTargetBits < FieldSize) {
+ // Check that we can access using a type of this size, without reading off
+ // the end of the structure. This can occur with packed structures and
+ // -fno-bitfield-type-align, for example.
+ if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ // If so, reduce access size to the next smaller power-of-two and retry.
+ AccessWidth >>= 1;
+ assert(AccessWidth >= 8 && "Cannot access under byte size!");
+ continue;
+ }
+
+ // Otherwise, add an access component.
+
+ // First, compute the bits inside this access which are part of the
+ // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
+ // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
+ // in the target that we are reading.
+ assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
+ assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
+ uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
+ uint64_t AccessBitsInFieldSize =
+ std::min(AccessWidth + AccessStart,
+ FieldOffset + FieldSize) - AccessBitsInFieldStart;
+
+ assert(NumComponents < 3 && "Unexpected number of components!");
+ CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
+ AI.FieldIndex = 0;
+ // FIXME: We still follow the old access pattern of only using the field
+ // byte offset. We should switch this once we fix the struct layout to be
+ // pretty.
+ AI.FieldByteOffset = AccessStart / 8;
+ AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
+ AI.AccessWidth = AccessWidth;
+ AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
+ AI.TargetBitOffset = AccessedTargetBits;
+ AI.TargetBitWidth = AccessBitsInFieldSize;
+
+ AccessStart += AccessWidth;
+ AccessedTargetBits += AI.TargetBitWidth;
+ }
+
+ assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
+ return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
+}
+
+void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
+ uint64_t FieldOffset) {
+ uint64_t FieldSize =
+ D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+
+ if (FieldSize == 0)
+ return;
+
+ uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
+ unsigned NumBytesToAppend;
+
+ if (FieldOffset < NextFieldOffset) {
+ assert(BitsAvailableInLastField && "Bitfield size mismatch!");
+ assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
+
+ // The bitfield begins in the previous bit-field.
+ NumBytesToAppend =
+ llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
+ } else {
+ assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
+
+ // Append padding if necessary.
+ AppendBytes((FieldOffset - NextFieldOffset) / 8);
+
+ NumBytesToAppend =
+ llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+
+ assert(NumBytesToAppend && "No bytes to append!");
+ }
+
+ // Add the bit field info.
+ LLVMBitFields.push_back(
+ LLVMBitFieldInfo(D, ComputeBitFieldInfo(Types, D, FieldOffset, FieldSize)));
+
+ AppendBytes(NumBytesToAppend);
+
+ BitsAvailableInLastField =
+ NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
+}
+
+bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
+ uint64_t FieldOffset) {
+ // If the field is packed, then we need a packed struct.
+ if (!Packed && D->hasAttr<PackedAttr>())
+ return false;
+
+ if (D->isBitField()) {
+ // We must use packed structs for unnamed bit fields since they
+ // don't affect the struct alignment.
+ if (!Packed && !D->getDeclName())
+ return false;
+
+ LayoutBitField(D, FieldOffset);
+ return true;
+ }
+
+ // Check if we have a pointer to data member in this field.
+ CheckForPointerToDataMember(D->getType());
+
+ assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
+ uint64_t FieldOffsetInBytes = FieldOffset / 8;
+
+ const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
+ unsigned TypeAlignment = getTypeAlignment(Ty);
+
+ // If the type alignment is larger then the struct alignment, we must use
+ // a packed struct.
+ if (TypeAlignment > Alignment) {
+ assert(!Packed && "Alignment is wrong even with packed struct!");
+ return false;
+ }
+
+ if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (const MaxFieldAlignmentAttr *MFAA =
+ RD->getAttr<MaxFieldAlignmentAttr>()) {
+ if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
+ return false;
+ }
+ }
+
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
+
+ if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
+ assert(!Packed && "Could not place field even with packed struct!");
+ return false;
+ }
+
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // Even with alignment, the field offset is not at the right place,
+ // insert padding.
+ uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+
+ AppendBytes(PaddingInBytes);
+ }
+
+ // Now append the field.
+ LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
+ AppendField(FieldOffsetInBytes, Ty);
+
+ return true;
+}
+
+const llvm::Type *
+CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout) {
+ if (Field->isBitField()) {
+ uint64_t FieldSize =
+ Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+
+ // Ignore zero sized bit fields.
+ if (FieldSize == 0)
+ return 0;
+
+ const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ unsigned NumBytesToAppend =
+ llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+
+ if (NumBytesToAppend > 1)
+ FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
+
+ // Add the bit field info.
+ LLVMBitFields.push_back(
+ LLVMBitFieldInfo(Field, ComputeBitFieldInfo(Types, Field, 0, FieldSize)));
+ return FieldTy;
+ }
+
+ // This is a regular union field.
+ LLVMFields.push_back(LLVMFieldInfo(Field, 0));
+ return Types.ConvertTypeForMemRecursive(Field->getType());
+}
+
+void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
+ assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
+
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+ const llvm::Type *Ty = 0;
+ uint64_t Size = 0;
+ unsigned Align = 0;
+
+ bool HasOnlyZeroSizedBitFields = true;
+
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ assert(Layout.getFieldOffset(FieldNo) == 0 &&
+ "Union field offset did not start at the beginning of record!");
+ const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
+
+ if (!FieldTy)
+ continue;
+
+ HasOnlyZeroSizedBitFields = false;
+
+ unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
+ uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
+
+ if (FieldAlign < Align)
+ continue;
+
+ if (FieldAlign > Align || FieldSize > Size) {
+ Ty = FieldTy;
+ Align = FieldAlign;
+ Size = FieldSize;
+ }
+ }
+
+ // Now add our field.
+ if (Ty) {
+ AppendField(0, Ty);
+
+ if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
+ // We need a packed struct.
+ Packed = true;
+ Align = 1;
+ }
+ }
+ if (!Align) {
+ assert(HasOnlyZeroSizedBitFields &&
+ "0-align record did not have all zero-sized bit-fields!");
+ Align = 1;
+ }
+
+ // Append tail padding.
+ if (Layout.getSize() / 8 > Size)
+ AppendPadding(Layout.getSize() / 8, Align);
+}
+
+void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ uint64_t BaseOffset) {
+ const ASTRecordLayout &Layout =
+ Types.getContext().getASTRecordLayout(BaseDecl);
+
+ uint64_t NonVirtualSize = Layout.getNonVirtualSize();
+
+ if (BaseDecl->isEmpty()) {
+ // FIXME: Lay out empty bases.
+ return;
+ }
+
+ CheckForPointerToDataMember(BaseDecl);
+
+ // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
+ AppendPadding(BaseOffset / 8, 1);
+
+ // Append the base field.
+ LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
+
+ AppendBytes(NonVirtualSize / 8);
+}
+
+void
+CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Check if we need to add a vtable pointer.
+ if (RD->isDynamicClass()) {
+ if (!PrimaryBase) {
+ const llvm::Type *FunctionType =
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
+ /*isVarArg=*/true);
+ const llvm::Type *VTableTy = FunctionType->getPointerTo();
+
+ assert(NextFieldOffsetInBytes == 0 &&
+ "VTable pointer must come first!");
+ AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
+ } else {
+ // FIXME: Handle a virtual primary base.
+ if (!Layout.getPrimaryBaseWasVirtual())
+ LayoutNonVirtualBase(PrimaryBase, 0);
+ }
+ }
+
+ // Layout the non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We've already laid out the primary base.
+ if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
+ continue;
+
+ LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
+ }
+}
+
+bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+ assert(!D->isUnion() && "Can't call LayoutFields on a union!");
+ assert(Alignment && "Did not set alignment!");
+
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
+ LayoutNonVirtualBases(RD, Layout);
+
+ unsigned FieldNo = 0;
+
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
+ assert(!Packed &&
+ "Could not layout fields even with a packed LLVM struct!");
+ return false;
+ }
+ }
+
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
+
+ return true;
+}
+
+void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
+ assert(RecordSize % 8 == 0 && "Invalid record size!");
+
+ uint64_t RecordSizeInBytes = RecordSize / 8;
+ assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+
+ uint64_t AlignedNextFieldOffset =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
+
+ if (AlignedNextFieldOffset == RecordSizeInBytes) {
+ // We don't need any padding.
+ return;
+ }
+
+ unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ AppendBytes(NumPadBytes);
+}
+
+void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
+ const llvm::Type *FieldTy) {
+ AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
+ getTypeAlignment(FieldTy));
+
+ uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
+
+ FieldTypes.push_back(FieldTy);
+
+ NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
+ BitsAvailableInLastField = 0;
+}
+
+void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
+ unsigned FieldAlignment) {
+ assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
+ "Incorrect field layout!");
+
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // Even with alignment, the field offset is not at the right place,
+ // insert padding.
+ uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+
+ AppendBytes(PaddingInBytes);
+ }
+}
+
+void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
+ if (NumBytes == 0)
+ return;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ // Append the padding field
+ AppendField(NextFieldOffsetInBytes, Ty);
+}
+
+unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
+ if (Packed)
+ return 1;
+
+ return Types.getTargetData().getABITypeAlignment(Ty);
+}
+
+void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
+ // This record already contains a member pointer.
+ if (ContainsPointerToDataMember)
+ return;
+
+ // Can only have member pointers if we're compiling C++.
+ if (!Types.getContext().getLangOptions().CPlusPlus)
+ return;
+
+ T = Types.getContext().getBaseElementType(T);
+
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+ if (!MPT->getPointeeType()->isFunctionType()) {
+ // We have a pointer to data member.
+ ContainsPointerToDataMember = true;
+ }
+ } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ return CheckForPointerToDataMember(RD);
+ }
+}
+
+void
+CGRecordLayoutBuilder::CheckForPointerToDataMember(const CXXRecordDecl *RD) {
+ // This record already contains a member pointer.
+ if (ContainsPointerToDataMember)
+ return;
+
+ // FIXME: It would be better if there was a way to explicitly compute the
+ // record layout instead of converting to a type.
+ Types.ConvertTagDeclType(RD);
+
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+
+ if (Layout.containsPointerToDataMember())
+ ContainsPointerToDataMember = true;
+}
+
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
+ CGRecordLayoutBuilder Builder(*this);
+
+ Builder.Layout(D);
+
+ const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
+ Builder.FieldTypes,
+ Builder.Packed);
+
+ CGRecordLayout *RL =
+ new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
+
+ // Add all the non-virtual base field numbers.
+ RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
+ Builder.LLVMNonVirtualBases.end());
+
+ // Add all the field numbers.
+ RL->FieldInfo.insert(Builder.LLVMFields.begin(),
+ Builder.LLVMFields.end());
+
+ // Add bitfield info.
+ RL->BitFields.insert(Builder.LLVMBitFields.begin(),
+ Builder.LLVMBitFields.end());
+
+ // Dump the layout, if requested.
+ if (getContext().getLangOptions().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
+ llvm::errs() << "Record: ";
+ D->dump();
+ llvm::errs() << "\nLayout: ";
+ RL->dump();
+ }
+
+#ifndef NDEBUG
+ // Verify that the computed LLVM struct size matches the AST layout size.
+ uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
+ assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
+ "Type size mismatch!");
+
+ // Verify that the LLVM and AST field offsets agree.
+ const llvm::StructType *ST =
+ dyn_cast<llvm::StructType>(RL->getLLVMType());
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
+
+ const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
+ RecordDecl::field_iterator it = D->field_begin();
+ for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
+ const FieldDecl *FD = *it;
+
+ // For non-bit-fields, just check that the LLVM struct offset matches the
+ // AST offset.
+ if (!FD->isBitField()) {
+ unsigned FieldNo = RL->getLLVMFieldNo(FD);
+ assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
+ "Invalid field offset!");
+ continue;
+ }
+
+ // Ignore unnamed bit-fields.
+ if (!FD->getDeclName())
+ continue;
+
+ const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Verify that every component access is within the structure.
+ uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
+ uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
+ assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
+ "Invalid bit-field access (out of range)!");
+ }
+ }
+#endif
+
+ return RL;
+}
+
+void CGRecordLayout::print(llvm::raw_ostream &OS) const {
+ OS << "<CGRecordLayout\n";
+ OS << " LLVMType:" << *LLVMType << "\n";
+ OS << " ContainsPointerToDataMember:" << ContainsPointerToDataMember << "\n";
+ OS << " BitFields:[\n";
+
+ // Print bit-field infos in declaration order.
+ std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
+ for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
+ it = BitFields.begin(), ie = BitFields.end();
+ it != ie; ++it) {
+ const RecordDecl *RD = it->first->getParent();
+ unsigned Index = 0;
+ for (RecordDecl::field_iterator
+ it2 = RD->field_begin(); *it2 != it->first; ++it2)
+ ++Index;
+ BFIs.push_back(std::make_pair(Index, &it->second));
+ }
+ llvm::array_pod_sort(BFIs.begin(), BFIs.end());
+ for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
+ OS.indent(4);
+ BFIs[i].second->print(OS);
+ OS << "\n";
+ }
+
+ OS << "]>\n";
+}
+
+void CGRecordLayout::dump() const {
+ print(llvm::errs());
+}
+
+void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
+ OS << "<CGBitFieldInfo";
+ OS << " Size:" << Size;
+ OS << " IsSigned:" << IsSigned << "\n";
+
+ OS.indent(4 + strlen("<CGBitFieldInfo"));
+ OS << " NumComponents:" << getNumComponents();
+ OS << " Components: [";
+ if (getNumComponents()) {
+ OS << "\n";
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ OS.indent(8);
+ OS << "<AccessInfo"
+ << " FieldIndex:" << AI.FieldIndex
+ << " FieldByteOffset:" << AI.FieldByteOffset
+ << " FieldBitStart:" << AI.FieldBitStart
+ << " AccessWidth:" << AI.AccessWidth << "\n";
+ OS.indent(8 + strlen("<AccessInfo"));
+ OS << " AccessAlignment:" << AI.AccessAlignment
+ << " TargetBitOffset:" << AI.TargetBitOffset
+ << " TargetBitWidth:" << AI.TargetBitWidth
+ << ">\n";
+ }
+ OS.indent(4);
+ }
+ OS << "]>";
+}
+
+void CGBitFieldInfo::dump() const {
+ print(llvm::errs());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..efde380
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,1179 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ if (isa<DeclStmt>(S))
+ DI->setLocation(S->getLocEnd());
+ else
+ DI->setLocation(S->getLocStart());
+ DI->EmitStopPoint(CurFn, Builder);
+ }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+ assert(S && "Null statement?");
+
+ // Check if we can handle this without bothering to generate an
+ // insert point or debug info.
+ if (EmitSimpleStmt(S))
+ return;
+
+ // Check if we are generating unreachable code.
+ if (!HaveInsertPoint()) {
+ // If so, and the statement doesn't contain a label, then we do not need to
+ // generate actual code. This is safe because (1) the current point is
+ // unreachable, so we don't need to execute the code, and (2) we've already
+ // handled the statements which update internal data structures (like the
+ // local variable map) which could be used by subsequent statements.
+ if (!ContainsLabel(S)) {
+ // Verify that any decl statements were handled as simple, they may be in
+ // scope of subsequent reachable statements.
+ assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
+ return;
+ }
+
+ // Otherwise, make a new block to hold the code.
+ EnsureInsertPoint();
+ }
+
+ // Generate a stoppoint if we are emitting debug info.
+ EmitStopPoint(S);
+
+ switch (S->getStmtClass()) {
+ default:
+ // Must be an expression in a stmt context. Emit the value (to get
+ // side-effects) and ignore the result.
+ if (!isa<Expr>(S))
+ ErrorUnsupported(S, "statement");
+
+ EmitAnyExpr(cast<Expr>(S), 0, false, true);
+
+ // Expression emitters don't handle unreachable blocks yet, so look for one
+ // explicitly here. This handles the common case of a call to a noreturn
+ // function.
+ // We can't erase blocks with an associated cleanup size here since the
+ // memory might be reused, leaving the old cleanup info pointing at a new
+ // block.
+ if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
+ if (CurBB->empty() && CurBB->use_empty() && !BlockScopes.count(CurBB)) {
+ CurBB->eraseFromParent();
+ Builder.ClearInsertionPoint();
+ }
+ }
+ break;
+ case Stmt::IndirectGotoStmtClass:
+ EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+ case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
+ case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
+ case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
+ case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
+
+ case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
+
+ case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
+ case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+
+ case Stmt::ObjCAtTryStmtClass:
+ EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+ break;
+ case Stmt::ObjCAtCatchStmtClass:
+ assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtFinallyStmtClass:
+ assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtThrowStmtClass:
+ EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+ break;
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+ break;
+
+ case Stmt::CXXTryStmtClass:
+ EmitCXXTryStmt(cast<CXXTryStmt>(*S));
+ break;
+ }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default: return false;
+ case Stmt::NullStmtClass: break;
+ case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+ case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
+ case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
+ case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
+ case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+ case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
+ case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
+ }
+
+ return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ llvm::Value *AggLoc, bool isAggVol) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+ "LLVM IR generation of compound statement ('{}')");
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getLBracLoc());
+ DI->EmitRegionStart(CurFn, Builder);
+ }
+
+ // Keep track of the current cleanup stack depth.
+ CleanupScope Scope(*this);
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end()-GetLast; I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI) {
+ DI->setLocation(S.getRBracLoc());
+ DI->EmitRegionEnd(CurFn, Builder);
+ }
+
+ RValue RV;
+ if (!GetLast)
+ RV = RValue::get(0);
+ else {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ const Stmt *LastStmt = S.body_back();
+ while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+ EmitLabel(*LS);
+ LastStmt = LS->getSubStmt();
+ }
+
+ EnsureInsertPoint();
+
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+ }
+
+ return RV;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+ llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+ // If there is a cleanup stack, then we it isn't worth trying to
+ // simplify this block (we would need to remove it from the scope map
+ // and cleanup entry).
+ if (!CleanupEntries.empty())
+ return;
+
+ // Can only simplify direct branches.
+ if (!BI || !BI->isUnconditional())
+ return;
+
+ BB->replaceAllUsesWith(BI->getSuccessor(0));
+ BI->eraseFromParent();
+ BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ // Fall out of the current block (if necessary).
+ EmitBranch(BB);
+
+ if (IsFinished && BB->use_empty()) {
+ delete BB;
+ return;
+ }
+
+ // If necessary, associate the block with the cleanup stack size.
+ if (!CleanupEntries.empty()) {
+ // Check if the basic block has already been inserted.
+ BlockScopeMap::iterator I = BlockScopes.find(BB);
+ if (I != BlockScopes.end()) {
+ assert(I->second == CleanupEntries.size() - 1);
+ } else {
+ BlockScopes[BB] = CleanupEntries.size() - 1;
+ CleanupEntries.back().Blocks.push_back(BB);
+ }
+ }
+
+ // Place the block after the current block, if possible, or else at
+ // the end of the function.
+ if (CurBB && CurBB->getParent())
+ CurFn->getBasicBlockList().insertAfter(CurBB, BB);
+ else
+ CurFn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+ // Emit a branch from the current block to the target one if this
+ // was a real block. If this was just a fall-through block after a
+ // terminator, don't emit it.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (!CurBB || CurBB->getTerminator()) {
+ // If there is no insert point or the previous block is already
+ // terminated, don't touch it.
+ } else {
+ // Otherwise, create a fall-through branch.
+ Builder.CreateBr(Target);
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitLabel(const LabelStmt &S) {
+ EmitBlock(getBasicBlockForLabel(&S));
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+ EmitLabel(S);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
+}
+
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ // Ensure that we have an i8* for our PHI node.
+ llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
+ llvm::Type::getInt8PtrTy(VMContext),
+ "addr");
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+
+ // Get the basic block for the indirect goto.
+ llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
+
+ // The first instruction in the block has to be the PHI for the switch dest,
+ // add an entry for this branch.
+ cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
+
+ EmitBranch(IndGotoBB);
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // C99 6.8.4.1: The first substatement is executed if the expression compares
+ // unequal to 0. The condition must be a scalar type.
+ CleanupScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
+ // Figure out which block (then or else) is executed.
+ const Stmt *Executed = S.getThen(), *Skipped = S.getElse();
+ if (Cond == -1) // Condition false?
+ std::swap(Executed, Skipped);
+
+ // If the skipped block has no labels in it, just emit the executed block.
+ // This avoids emitting dead code and simplifies the CFG substantially.
+ if (!ContainsLabel(Skipped)) {
+ if (Executed) {
+ CleanupScope ExecutedScope(*this);
+ EmitStmt(Executed);
+ }
+ return;
+ }
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
+ // the conditional branch.
+ llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+ llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+ llvm::BasicBlock *ElseBlock = ContBlock;
+ if (S.getElse())
+ ElseBlock = createBasicBlock("if.else");
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+ // Emit the 'then' code.
+ EmitBlock(ThenBlock);
+ {
+ CleanupScope ThenScope(*this);
+ EmitStmt(S.getThen());
+ }
+ EmitBranch(ContBlock);
+
+ // Emit the 'else' code if present.
+ if (const Stmt *Else = S.getElse()) {
+ EmitBlock(ElseBlock);
+ {
+ CleanupScope ElseScope(*this);
+ EmitStmt(Else);
+ }
+ EmitBranch(ContBlock);
+ }
+
+ // Emit the continuation block for code after the if.
+ EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+ // Emit the header for the loop, insert it, which will create an uncond br to
+ // it.
+ llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
+ EmitBlock(LoopHeader);
+
+ // Create an exit block for when the condition fails, create a block for the
+ // body of the loop.
+ llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ llvm::BasicBlock *CleanupBlock = 0;
+ llvm::BasicBlock *EffectiveExitBlock = ExitBlock;
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
+
+ // C++ [stmt.while]p2:
+ // When the condition of a while statement is a declaration, the
+ // scope of the variable that is declared extends from its point
+ // of declaration (3.3.2) to the end of the while statement.
+ // [...]
+ // The object created in a condition is destroyed and created
+ // with each iteration of the loop.
+ CleanupScope ConditionScope(*this);
+
+ if (S.getConditionVariable()) {
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+ // If this condition variable requires cleanups, create a basic
+ // block to handle those cleanups.
+ if (ConditionScope.requiresCleanups()) {
+ CleanupBlock = createBasicBlock("while.cleanup");
+ EffectiveExitBlock = CleanupBlock;
+ }
+ }
+
+ // Evaluate the conditional in the while header. C99 6.8.5.1: The
+ // evaluation of the controlling expression takes place before each
+ // execution of the loop body.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // while(1) is common, avoid extra exit blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, go to the loop body.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock);
+
+ // Emit the loop body.
+ {
+ CleanupScope BodyScope(*this);
+ EmitBlock(LoopBody);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ if (CleanupBlock) {
+ // If we have a cleanup block, jump there to perform cleanups
+ // before looping.
+ EmitBranch(CleanupBlock);
+
+ // Emit the cleanup block, performing cleanups for the condition
+ // and then jumping to either the loop header or the exit block.
+ EmitBlock(CleanupBlock);
+ ConditionScope.ForceCleanup();
+ Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock);
+ } else {
+ // Cycle to the condition.
+ EmitBranch(LoopHeader);
+ }
+
+ // Emit the exit block.
+ EmitBlock(ExitBlock, true);
+
+
+ // The LoopHeader typically is just a branch if we skipped emitting
+ // a branch, try to erase it.
+ if (!EmitBoolCondBranch && !CleanupBlock)
+ SimplifyForwardingBlocks(LoopHeader);
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
+ // Emit the body for the loop, insert it, which will create an uncond br to
+ // it.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
+ EmitBlock(LoopBody);
+
+ llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
+
+ // Emit the body of the loop into the block.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(DoCond);
+
+ // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+ // after each execution of the loop body."
+
+ // Evaluate the conditional in the while header.
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isZero())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, iterate the loop.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
+
+ // Emit the exit block.
+ EmitBlock(AfterDo);
+
+ // The DoCond block typically is just a branch if we skipped
+ // emitting a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(DoCond);
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S) {
+ CleanupScope ForScope(*this);
+
+ // Evaluate the first part before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+ llvm::BasicBlock *IncBlock = 0;
+ llvm::BasicBlock *CondCleanup = 0;
+ llvm::BasicBlock *EffectiveExitBlock = AfterFor;
+ EmitBlock(CondBlock);
+
+ // Create a cleanup scope for the condition variable cleanups.
+ CleanupScope ConditionScope(*this);
+
+ llvm::Value *BoolCondVal = 0;
+ if (S.getCond()) {
+ // If the for statement has a condition scope, emit the local variable
+ // declaration.
+ if (S.getConditionVariable()) {
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+ if (ConditionScope.requiresCleanups()) {
+ CondCleanup = createBasicBlock("for.cond.cleanup");
+ EffectiveExitBlock = CondCleanup;
+ }
+ }
+
+ // As long as the condition is true, iterate the loop.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock);
+
+ EmitBlock(ForBody);
+ } else {
+ // Treat it as a non-zero constant. Don't even create a new block for the
+ // body, just fall into it.
+ }
+
+ // If the for loop doesn't have an increment we can just use the
+ // condition as the continue block.
+ llvm::BasicBlock *ContinueBlock;
+ if (S.getInc())
+ ContinueBlock = IncBlock = createBasicBlock("for.inc");
+ else
+ ContinueBlock = CondBlock;
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
+
+ // If the condition is true, execute the body of the for stmt.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getBegin());
+ DI->EmitRegionStart(CurFn, Builder);
+ }
+
+ {
+ // Create a separate cleanup scope for the body, in case it is not
+ // a compound statement.
+ CleanupScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ if (S.getInc()) {
+ EmitBlock(IncBlock);
+ EmitStmt(S.getInc());
+ }
+
+ BreakContinueStack.pop_back();
+
+ // Finally, branch back up to the condition for the next iteration.
+ if (CondCleanup) {
+ // Branch to the cleanup block.
+ EmitBranch(CondCleanup);
+
+ // Emit the cleanup block, which branches back to the loop body or
+ // outside of the for statement once it is done.
+ EmitBlock(CondCleanup);
+ ConditionScope.ForceCleanup();
+ Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor);
+ } else
+ EmitBranch(CondBlock);
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getEnd());
+ DI->EmitRegionEnd(CurFn, Builder);
+ }
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+ if (RV.isScalar()) {
+ Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+ } else if (RV.isAggregate()) {
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ } else {
+ StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
+ }
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void. Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+ // Emit the result value, even if unused, to evalute the side effects.
+ const Expr *RV = S.getRetValue();
+
+ // FIXME: Clean this up by using an LValue for ReturnTemp,
+ // EmitStoreThroughLValue, and EmitAnyExpr.
+ if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
+ !Target.useGlobalsForAutomaticVariables()) {
+ // Apply the named return value optimization for this return statement,
+ // which means doing nothing: the appropriate result has already been
+ // constructed into the NRVO variable.
+
+ // If there is an NRVO flag for this variable, set it to 1 into indicate
+ // that the cleanup code should not destroy the variable.
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) {
+ const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
+ llvm::Value *One = llvm::ConstantInt::get(BoolTy, 1);
+ Builder.CreateStore(One, NRVOFlag);
+ }
+ } else if (!ReturnValue) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (RV)
+ EmitAnyExpr(RV);
+ } else if (RV == 0) {
+ // Do nothing (return value is left uninitialized)
+ } else if (FnRetTy->isReferenceType()) {
+ // If this function returns a reference, take the address of the expression
+ // rather than the value.
+ RValue Result = EmitReferenceBindingToExpr(RV, false);
+ Builder.CreateStore(Result.getScalarVal(), ReturnValue);
+ } else if (!hasAggregateLLVMType(RV->getType())) {
+ Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ } else if (RV->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(RV, ReturnValue, false);
+ } else {
+ EmitAggExpr(RV, ReturnValue, false);
+ }
+
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+ // As long as debug info is modeled with instructions, we have to ensure we
+ // have a place to insert here and write the stop point here.
+ if (getDebugInfo()) {
+ EnsureInsertPoint();
+ EmitStopPoint(&S);
+ }
+
+ for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
+ I != E; ++I)
+ EmitDecl(**I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+ assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+ assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+ llvm::APSInt LHS = S.getLHS()->EvaluateAsInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateAsInt(getContext());
+
+ // Emit the code for this case. We do this first to make sure it is
+ // properly chained from our predecessor before generating the
+ // switch machinery to enter this block.
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ EmitStmt(S.getSubStmt());
+
+ // If range is empty, do nothing.
+ if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+ return;
+
+ llvm::APInt Range = RHS - LHS;
+ // FIXME: parameters such as this should not be hardcoded.
+ if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+ // Range is small enough to add multiple switch instruction cases.
+ for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+ SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, LHS), CaseDest);
+ LHS++;
+ }
+ return;
+ }
+
+ // The range is too big. Emit "if" condition into a new block,
+ // making sure to save and restore the current insertion point.
+ llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+ // Push this test onto the chain of range checks (which terminates
+ // in the default basic block). The switch's default will be changed
+ // to the top of this chain after switch emission is complete.
+ llvm::BasicBlock *FalseDest = CaseRangeBlock;
+ CaseRangeBlock = createBasicBlock("sw.caserange");
+
+ CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ Builder.SetInsertPoint(CaseRangeBlock);
+
+ // Emit range check.
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(),
+ llvm::ConstantInt::get(VMContext, LHS), "tmp");
+ llvm::Value *Cond =
+ Builder.CreateICmpULE(Diff,
+ llvm::ConstantInt::get(VMContext, Range), "tmp");
+ Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+ // Restore the appropriate insertion point.
+ if (RestoreBB)
+ Builder.SetInsertPoint(RestoreBB);
+ else
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ if (S.getRHS()) {
+ EmitCaseStmtRange(S);
+ return;
+ }
+
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+
+ // Recursively emitting the statement is acceptable, but is not wonderful for
+ // code where we have many case statements nested together, i.e.:
+ // case 1:
+ // case 2:
+ // case 3: etc.
+ // Handling this recursively will create a new block for each case statement
+ // that falls through to the next case which is IR intensive. It also causes
+ // deep recursion which can run into stack depth limitations. Handle
+ // sequential non-range case statements specially.
+ const CaseStmt *CurCase = &S;
+ const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+ // Otherwise, iteratively add consequtive cases to this switch stmt.
+ while (NextCase && NextCase->getRHS() == 0) {
+ CurCase = NextCase;
+ CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+
+ NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+ }
+
+ // Normal default recursion for non-cases.
+ EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+ llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+ assert(DefaultBlock->empty() &&
+ "EmitDefaultStmt: Default block already defined?");
+ EmitBlock(DefaultBlock);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+ CleanupScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+ llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+ // Create basic block to hold stuff that comes after switch
+ // statement. We also need to create a default block now so that
+ // explicit case ranges tests can have a place to jump to on
+ // failure.
+ llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
+ llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+ SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+ CaseRangeBlock = DefaultBlock;
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // All break statements jump to NextBlock. If BreakContinueStack is non empty
+ // then reuse last ContinueBlock.
+ llvm::BasicBlock *ContinueBlock = 0;
+ if (!BreakContinueStack.empty())
+ ContinueBlock = BreakContinueStack.back().ContinueBlock;
+
+ // Ensure any vlas created between there and here, are undone
+ BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
+
+ // Emit switch body.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Update the default block in case explicit case range tests have
+ // been chained on top.
+ SwitchInsn->setSuccessor(0, CaseRangeBlock);
+
+ // If a default was never emitted then reroute any jumps to it and
+ // discard.
+ if (!DefaultBlock->getParent()) {
+ DefaultBlock->replaceAllUsesWith(NextBlock);
+ delete DefaultBlock;
+ }
+
+ // Emit continuation.
+ EmitBlock(NextBlock, true);
+
+ SwitchInsn = SavedSwitchInsn;
+ CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
+ llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+ std::string Result;
+
+ while (*Constraint) {
+ switch (*Constraint) {
+ default:
+ Result += Target.convertConstraint(*Constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ break;
+ case 'g':
+ Result += "imr";
+ break;
+ case '[': {
+ assert(OutCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned Index;
+ bool result = Target.resolveSymbolicName(Constraint,
+ &(*OutCons)[0],
+ OutCons->size(), Index);
+ assert(result && "Could not resolve symbolic name"); result=result;
+ Result += llvm::utostr(Index);
+ break;
+ }
+ }
+
+ Constraint++;
+ }
+
+ return Result;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ llvm::Value *Arg;
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
+ if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType())) {
+ Arg = EmitScalarExpr(InputExpr);
+ } else {
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+
+ const llvm::Type *Ty = ConvertType(InputExpr->getType());
+ uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ Ty = llvm::IntegerType::get(VMContext, Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
+ } else {
+ Arg = Dest.getAddress();
+ ConstraintStr += '*';
+ }
+ }
+ } else {
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ Arg = Dest.getAddress();
+ ConstraintStr += '*';
+ }
+
+ return Arg;
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
+
+ // Assemble the pieces into the final asm string.
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+
+ // Get all the output and input constraints together.
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
+ S.getOutputName(i));
+ bool IsValid = Target.validateOutputConstraint(Info); (void)IsValid;
+ assert(IsValid && "Failed to parse output constraint");
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
+ S.getInputName(i));
+ bool IsValid = Target.validateInputConstraint(OutputConstraintInfos.data(),
+ S.getNumOutputs(), Info);
+ assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
+ InputConstraintInfos.push_back(Info);
+ }
+
+ std::string Constraints;
+
+ std::vector<LValue> ResultRegDests;
+ std::vector<QualType> ResultRegQualTys;
+ std::vector<const llvm::Type *> ResultRegTypes;
+ std::vector<const llvm::Type *> ResultTruncRegTypes;
+ std::vector<const llvm::Type*> ArgTypes;
+ std::vector<llvm::Value*> Args;
+
+ // Keep track of inout constraints.
+ std::string InOutConstraints;
+ std::vector<llvm::Value*> InOutArgs;
+ std::vector<const llvm::Type*> InOutArgTypes;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string OutputConstraint(S.getOutputConstraint(i));
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+
+ const Expr *OutExpr = S.getOutputExpr(i);
+ OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+ LValue Dest = EmitLValue(OutExpr);
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // If this is a register output, then make the inline asm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
+ Constraints += "=" + OutputConstraint;
+ ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegDests.push_back(Dest);
+ ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+ ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+ // If this output is tied to an input, and if the input is larger, then
+ // we need to set the actual result type of the inline asm node to be the
+ // same as the input type.
+ if (Info.hasMatchingInput()) {
+ unsigned InputNo;
+ for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+ TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+ if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
+ break;
+ }
+ assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+ QualType InputTy = S.getInputExpr(InputNo)->getType();
+ QualType OutputType = OutExpr->getType();
+
+ uint64_t InputSize = getContext().getTypeSize(InputTy);
+ if (getContext().getTypeSize(OutputType) < InputSize) {
+ // Form the asm to return the value as a larger integer or fp type.
+ ResultRegTypes.back() = ConvertType(InputTy);
+ }
+ }
+ } else {
+ ArgTypes.push_back(Dest.getAddress()->getType());
+ Args.push_back(Dest.getAddress());
+ Constraints += "=*";
+ Constraints += OutputConstraint;
+ }
+
+ if (Info.isReadWrite()) {
+ InOutConstraints += ',';
+
+ const Expr *InputExpr = S.getOutputExpr(i);
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, InOutConstraints);
+
+ if (Info.allowsRegister())
+ InOutConstraints += llvm::utostr(i);
+ else
+ InOutConstraints += OutputConstraint;
+
+ InOutArgTypes.push_back(Arg->getType());
+ InOutArgs.push_back(Arg);
+ }
+ }
+
+ unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ const Expr *InputExpr = S.getInputExpr(i);
+
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // Simplify the input constraint.
+ std::string InputConstraint(S.getInputConstraint(i));
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+ &OutputConstraintInfos);
+
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+
+ // If this input argument is tied to a larger output result, extend the
+ // input to be the same size as the output. The LLVM backend wants to see
+ // the input and output of a matching constraint be the same size. Note
+ // that GCC does not define what the top bits are here. We use zext because
+ // that is usually cheaper, but LLVM IR should really get an anyext someday.
+ if (Info.hasTiedOperand()) {
+ unsigned Output = Info.getTiedOperand();
+ QualType OutputType = S.getOutputExpr(Output)->getType();
+ QualType InputTy = InputExpr->getType();
+
+ if (getContext().getTypeSize(OutputType) >
+ getContext().getTypeSize(InputTy)) {
+ // Use ptrtoint as appropriate so that we can do our extension.
+ if (isa<llvm::PointerType>(Arg->getType()))
+ Arg = Builder.CreatePtrToInt(Arg,
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth));
+ const llvm::Type *OutputTy = ConvertType(OutputType);
+ if (isa<llvm::IntegerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, OutputTy);
+ else
+ Arg = Builder.CreateFPExt(Arg, OutputTy);
+ }
+ }
+
+
+ ArgTypes.push_back(Arg->getType());
+ Args.push_back(Arg);
+ Constraints += InputConstraint;
+ }
+
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+ llvm::StringRef Clobber = S.getClobber(i)->getString();
+
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber);
+
+ if (i != 0 || NumConstraints != 0)
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = Target.getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ const llvm::Type *ResultType;
+ if (ResultRegTypes.empty())
+ ResultType = llvm::Type::getVoidTy(VMContext);
+ else if (ResultRegTypes.size() == 1)
+ ResultType = ResultRegTypes[0];
+ else
+ ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints,
+ S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+
+ // Slap the source location of the inline asm into a !srcloc metadata on the
+ // call.
+ unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
+ llvm::Value *LocIDC =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LocID);
+ Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
+
+ // Extract all of the register value results from the asm.
+ std::vector<llvm::Value*> RegResults;
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+ const llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext,
+ (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(VMContext,
+ (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ }
+ }
+
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
+ ResultRegQualTys[i]);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
new file mode 100644
index 0000000..a8f0467
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
@@ -0,0 +1,160 @@
+//===--- CGTemporaries.cpp - Emit LLVM Code for C++ temporaries -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of temporaries
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
+ llvm::Value *Ptr) {
+ assert((LiveTemporaries.empty() ||
+ LiveTemporaries.back().ThisPtr != Ptr ||
+ ConditionalBranchLevel) &&
+ "Pushed the same temporary twice; AST is likely wrong");
+ llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
+
+ llvm::AllocaInst *CondPtr = 0;
+
+ // Check if temporaries need to be conditional. If so, we'll create a
+ // condition boolean, initialize it to 0 and
+ if (ConditionalBranchLevel != 0) {
+ CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond");
+
+ // Initialize it to false. This initialization takes place right after
+ // the alloca insert point.
+ InitTempAlloca(CondPtr, llvm::ConstantInt::getFalse(VMContext));
+
+ // Now set it to true.
+ Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
+ }
+
+ LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
+ CondPtr));
+
+ PushCleanupBlock(DtorBlock);
+
+ if (Exceptions) {
+ const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
+ llvm::BasicBlock *CondEnd = 0;
+
+ EHCleanupBlock Cleanup(*this);
+
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (Info.CondPtr) {
+ llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
+ CondEnd = createBasicBlock("cond.dtor.end");
+
+ llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
+ Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ EmitBlock(CondBlock);
+ }
+
+ EmitCXXDestructorCall(Info.Temporary->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ Info.ThisPtr);
+
+ if (CondEnd) {
+ // Reset the condition. to false.
+ Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
+ EmitBlock(CondEnd);
+ }
+ }
+}
+
+void CodeGenFunction::PopCXXTemporary() {
+ const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
+
+ CleanupBlockInfo CleanupInfo = PopCleanupBlock();
+ assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
+ "Cleanup block mismatch!");
+ assert(!CleanupInfo.SwitchBlock &&
+ "Should not have a switch block for temporary cleanup!");
+ assert(!CleanupInfo.EndBlock &&
+ "Should not have an end block for temporary cleanup!");
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ if (CurBB && !CurBB->getTerminator() &&
+ Info.DtorBlock->getNumUses() == 0) {
+ CurBB->getInstList().splice(CurBB->end(), Info.DtorBlock->getInstList());
+ delete Info.DtorBlock;
+ } else
+ EmitBlock(Info.DtorBlock);
+
+ llvm::BasicBlock *CondEnd = 0;
+
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (Info.CondPtr) {
+ llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
+ CondEnd = createBasicBlock("cond.dtor.end");
+
+ llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
+ Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ EmitBlock(CondBlock);
+ }
+
+ EmitCXXDestructorCall(Info.Temporary->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false, Info.ThisPtr);
+
+ if (CondEnd) {
+ // Reset the condition. to false.
+ Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
+ EmitBlock(CondEnd);
+ }
+
+ LiveTemporaries.pop_back();
+}
+
+RValue
+CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc,
+ bool IsAggLocVolatile,
+ bool IsInitializer) {
+ // Keep track of the current cleanup stack depth.
+ size_t CleanupStackDepth = CleanupEntries.size();
+ (void) CleanupStackDepth;
+
+ RValue RV;
+
+ {
+ CXXTemporariesCleanupScope Scope(*this);
+
+ RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+ /*IgnoreResult=*/false, IsInitializer);
+ }
+ assert(CleanupEntries.size() == CleanupStackDepth &&
+ "Cleanup size mismatch!");
+
+ return RV;
+}
+
+LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
+ const CXXExprWithTemporaries *E) {
+ // Keep track of the current cleanup stack depth.
+ size_t CleanupStackDepth = CleanupEntries.size();
+ (void) CleanupStackDepth;
+
+ unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Pop temporaries.
+ while (LiveTemporaries.size() > OldNumLiveTemporaries)
+ PopCXXTemporary();
+
+ assert(CleanupEntries.size() == CleanupStackDepth &&
+ "Cleanup size mismatch!");
+
+ return LV;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
new file mode 100644
index 0000000..61c7423
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
@@ -0,0 +1,487 @@
+//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of VTTs (vtable tables).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "clang/AST/RecordLayout.h"
+using namespace clang;
+using namespace CodeGen;
+
+#define D1(x)
+
+namespace {
+
+/// VTT builder - Class for building VTT layout information.
+class VTTBuilder {
+
+ CodeGenModule &CGM;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ typedef llvm::SmallVector<llvm::Constant *, 64> VTTComponentsVectorTy;
+
+ /// VTTComponents - The VTT components.
+ VTTComponentsVectorTy VTTComponents;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
+
+ /// SubVTTIndicies - The sub-VTT indices for the bases of the most derived
+ /// class.
+ llvm::DenseMap<BaseSubobject, uint64_t> SubVTTIndicies;
+
+ /// SecondaryVirtualPointerIndices - The secondary virtual pointer indices of
+ /// all subobjects of the most derived class.
+ llvm::DenseMap<BaseSubobject, uint64_t> SecondaryVirtualPointerIndices;
+
+ /// GenerateDefinition - Whether the VTT builder should generate LLVM IR for
+ /// the VTT.
+ bool GenerateDefinition;
+
+ /// GetAddrOfVTable - Returns the address of the vtable for the base class in
+ /// the given vtable class.
+ ///
+ /// \param AddressPoints - If the returned vtable is a construction vtable,
+ /// this will hold the address points for it.
+ llvm::Constant *GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
+ AddressPointsMapTy& AddressPoints);
+
+ /// AddVTablePointer - Add a vtable pointer to the VTT currently being built.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints);
+
+ /// LayoutSecondaryVTTs - Lay out the secondary VTTs of the given base
+ /// subobject.
+ void LayoutSecondaryVTTs(BaseSubobject Base);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const AddressPointsMapTy& AddressPoints);
+
+ /// LayoutVirtualVTTs - Lay out the VTTs for the virtual base classes of the
+ /// given record decl.
+ void LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTT - Will lay out the VTT for the given subobject, including any
+ /// secondary VTTs, secondary virtual pointers and virtual VTTs.
+ void LayoutVTT(BaseSubobject Base, bool BaseIsVirtual);
+
+public:
+ VTTBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition);
+
+ // getVTTComponents - Returns a reference to the VTT components.
+ const VTTComponentsVectorTy &getVTTComponents() const {
+ return VTTComponents;
+ }
+
+ /// getSubVTTIndicies - Returns a reference to the sub-VTT indices.
+ const llvm::DenseMap<BaseSubobject, uint64_t> &getSubVTTIndicies() const {
+ return SubVTTIndicies;
+ }
+
+ /// getSecondaryVirtualPointerIndices - Returns a reference to the secondary
+ /// virtual pointer indices.
+ const llvm::DenseMap<BaseSubobject, uint64_t> &
+ getSecondaryVirtualPointerIndices() const {
+ return SecondaryVirtualPointerIndices;
+ }
+
+};
+
+VTTBuilder::VTTBuilder(CodeGenModule &CGM,
+ const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition)
+ : CGM(CGM), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassLayout(CGM.getContext().getASTRecordLayout(MostDerivedClass)),
+ GenerateDefinition(GenerateDefinition) {
+
+ // Lay out this VTT.
+ LayoutVTT(BaseSubobject(MostDerivedClass, 0), /*BaseIsVirtual=*/false);
+}
+
+llvm::Constant *
+VTTBuilder::GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
+ AddressPointsMapTy& AddressPoints) {
+ if (!GenerateDefinition)
+ return 0;
+
+ if (Base.getBase() == MostDerivedClass) {
+ assert(Base.getBaseOffset() == 0 &&
+ "Most derived class vtable must have a zero offset!");
+ // This is a regular vtable.
+ return CGM.getVTables().GetAddrOfVTable(MostDerivedClass);
+ }
+
+ return CGM.getVTables().GenerateConstructionVTable(MostDerivedClass,
+ Base, BaseIsVirtual,
+ AddressPoints);
+}
+
+void VTTBuilder::AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints) {
+ // Store the vtable pointer index if we're generating the primary VTT.
+ if (VTableClass == MostDerivedClass) {
+ assert(!SecondaryVirtualPointerIndices.count(Base) &&
+ "A virtual pointer index already exists for this base subobject!");
+ SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
+ }
+
+ if (!GenerateDefinition) {
+ VTTComponents.push_back(0);
+ return;
+ }
+
+ uint64_t AddressPoint;
+ if (VTableClass != MostDerivedClass) {
+ // The vtable is a construction vtable, look in the construction vtable
+ // address points.
+ AddressPoint = AddressPoints.lookup(Base);
+ assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
+ } else {
+ // Just get the address point for the regular vtable.
+ AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ assert(AddressPoint != 0 && "Did not find vtable address point!");
+ }
+
+ if (!AddressPoint) AddressPoint = 0;
+
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0),
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()),
+ AddressPoint)
+ };
+
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs, 2);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+
+ VTTComponents.push_back(Init);
+}
+
+void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ // Don't layout virtual bases.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ uint64_t BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ // Layout the VTT for this base.
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints,
+ VisitedVirtualBasesSetTy &VBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // We're not interested in bases that don't have virtual bases, and not
+ // morally virtual bases.
+ if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers are present for all bases with either
+ // virtual bases or virtual function declarations overridden along a
+ // virtual path.
+ //
+ // If the base class is not dynamic, we don't want to add it, nor any
+ // of its base classes.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ bool BaseDeclIsNonVirtualPrimaryBase = false;
+ uint64_t BaseOffset;
+ if (I->isVirtual()) {
+ // Ignore virtual bases that we've already visited.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseDeclIsMorallyVirtual = true;
+ } else {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+
+ if (!Layout.getPrimaryBaseWasVirtual() &&
+ Layout.getPrimaryBase() == BaseDecl)
+ BaseDeclIsNonVirtualPrimaryBase = true;
+ }
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers: for each base class X which (a) has virtual
+ // bases or is reachable along a virtual path from D, and (b) is not a
+ // non-virtual primary base, the address of the virtual table for X-in-D
+ // or an appropriate construction virtual table.
+ if (!BaseDeclIsNonVirtualPrimaryBase &&
+ (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
+ // Add the vtable pointer.
+ AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTable, VTableClass,
+ AddressPoints);
+ }
+
+ // And lay out the secondary virtual pointers for the base class.
+ LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
+ BaseDeclIsMorallyVirtual, VTable,
+ VTableClass, AddressPoints, VBases);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const AddressPointsMapTy& AddressPoints) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
+ VTable, Base.getBase(), AddressPoints, VBases);
+}
+
+void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base.
+ if (I->isVirtual()) {
+ // Check if we've seen this base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
+ }
+
+ // We only need to layout virtual VTTs for this base if it actually has
+ // virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVirtualVTTs(BaseDecl, VBases);
+ }
+}
+
+void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Itanium C++ ABI 2.6.2:
+ // An array of virtual table addresses, called the VTT, is declared for
+ // each class type that has indirect or direct virtual base classes.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
+
+ if (!IsPrimaryVTT) {
+ // Remember the sub-VTT index.
+ SubVTTIndicies[Base] = VTTComponents.size();
+ }
+
+ AddressPointsMapTy AddressPoints;
+ llvm::Constant *VTable = GetAddrOfVTable(Base, BaseIsVirtual, AddressPoints);
+
+ // Add the primary vtable pointer.
+ AddVTablePointer(Base, VTable, RD, AddressPoints);
+
+ // Add the secondary VTTs.
+ LayoutSecondaryVTTs(Base);
+
+ // Add the secondary virtual pointers.
+ LayoutSecondaryVirtualPointers(Base, VTable, AddressPoints);
+
+ // If this is the primary VTT, we want to lay out virtual VTTs as well.
+ if (IsPrimaryVTT) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutVirtualVTTs(Base.getBase(), VBases);
+ }
+}
+
+}
+
+llvm::GlobalVariable *
+CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
+ const CXXRecordDecl *RD) {
+ // Only classes that have virtual bases need a VTT.
+ if (RD->getNumVBases() == 0)
+ return 0;
+
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXVTT(RD, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ D1(printf("vtt %s\n", RD->getNameAsCString()));
+
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+ if (GV == 0 || GV->isDeclaration()) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+ VTTBuilder Builder(CGM, RD, GenerateDefinition);
+
+ const llvm::ArrayType *Type =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+
+ llvm::Constant *Init = 0;
+ if (GenerateDefinition)
+ Init = llvm::ConstantArray::get(Type, Builder.getVTTComponents().data(),
+ Builder.getVTTComponents().size());
+
+ llvm::GlobalVariable *OldGV = GV;
+ GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
+ Linkage, Init, Name);
+ CGM.setGlobalVisibility(GV, RD);
+
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+ }
+
+ return GV;
+}
+
+llvm::GlobalVariable *CodeGenVTables::getVTT(const CXXRecordDecl *RD) {
+ return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
+ /*GenerateDefinition=*/false, RD);
+}
+
+bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // We don't have any virtual bases, just return early.
+ if (!MD->getParent()->getNumVBases())
+ return false;
+
+ // Check if we have a base constructor.
+ if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+ return true;
+
+ // Check if we have a base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return true;
+
+ return false;
+}
+
+uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
+
+ SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
+ if (I != SubVTTIndicies.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSubVTTIndicies().begin(),
+ E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ // Insert all indices.
+ BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
+
+ SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
+ }
+
+ I = SubVTTIndicies.find(ClassSubobjectPair);
+ assert(I != SubVTTIndicies.end() && "Did not find index!");
+
+ return I->second;
+}
+
+uint64_t
+CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ SecondaryVirtualPointerIndicesMapTy::iterator I =
+ SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+
+ if (I != SecondaryVirtualPointerIndices.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+
+ // Insert all secondary vpointer indices.
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSecondaryVirtualPointerIndices().begin(),
+ E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) {
+ std::pair<const CXXRecordDecl *, BaseSubobject> Pair =
+ std::make_pair(RD, I->first);
+
+ SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second));
+ }
+
+ I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+ assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!");
+
+ return I->second;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
new file mode 100644
index 0000000..0f023e6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -0,0 +1,3133 @@
+//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+/// BaseOffset - Represents an offset from a derived class to a direct or
+/// indirect base class.
+struct BaseOffset {
+ /// DerivedClass - The derived class.
+ const CXXRecordDecl *DerivedClass;
+
+ /// VirtualBase - If the path from the derived class to the base class
+ /// involves a virtual base class, this holds its declaration.
+ const CXXRecordDecl *VirtualBase;
+
+ /// NonVirtualOffset - The offset from the derived class to the base class.
+ /// (Or the offset from the virtual base class to the base class, if the
+ /// path from the derived class to the base class involves a virtual base
+ /// class.
+ int64_t NonVirtualOffset;
+
+ BaseOffset() : DerivedClass(0), VirtualBase(0), NonVirtualOffset(0) { }
+ BaseOffset(const CXXRecordDecl *DerivedClass,
+ const CXXRecordDecl *VirtualBase, int64_t NonVirtualOffset)
+ : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
+ NonVirtualOffset(NonVirtualOffset) { }
+
+ bool isEmpty() const { return !NonVirtualOffset && !VirtualBase; }
+};
+
+/// FinalOverriders - Contains the final overrider member functions for all
+/// member functions in the base subobjects of a class.
+class FinalOverriders {
+public:
+ /// OverriderInfo - Information about a final overrider.
+ struct OverriderInfo {
+ /// Method - The method decl of the overrider.
+ const CXXMethodDecl *Method;
+
+ /// Offset - the base offset of the overrider in the layout class.
+ uint64_t Offset;
+
+ OverriderInfo() : Method(0), Offset(0) { }
+ };
+
+private:
+ /// MostDerivedClass - The most derived class for which the final overriders
+ /// are stored.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building final overriders for a
+ /// construction vtable, this holds the offset from the layout class to the
+ /// most derived class.
+ const uint64_t MostDerivedClassOffset;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if the final overriders are for a
+ /// construction vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ ASTContext &Context;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ /// BaseSubobjectMethodPairTy - Uniquely identifies a member function
+ /// in a base subobject.
+ typedef std::pair<BaseSubobject, const CXXMethodDecl *>
+ BaseSubobjectMethodPairTy;
+
+ typedef llvm::DenseMap<BaseSubobjectMethodPairTy,
+ OverriderInfo> OverridersMapTy;
+
+ /// OverridersMap - The final overriders for all virtual member functions of
+ /// all the base subobjects of the most derived class.
+ OverridersMapTy OverridersMap;
+
+ /// VisitedVirtualBases - A set of all the visited virtual bases, used to
+ /// avoid visiting virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ typedef llvm::DenseMap<BaseSubobjectMethodPairTy, BaseOffset>
+ AdjustmentOffsetsMapTy;
+
+ /// ReturnAdjustments - Holds return adjustments for all the overriders that
+ /// need to perform return value adjustments.
+ AdjustmentOffsetsMapTy ReturnAdjustments;
+
+ // FIXME: We might be able to get away with making this a SmallSet.
+ typedef llvm::SmallSetVector<uint64_t, 2> OffsetSetVectorTy;
+
+ /// SubobjectOffsetsMapTy - This map is used for keeping track of all the
+ /// base subobject offsets that a single class declaration might refer to.
+ ///
+ /// For example, in:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B1 : A { };
+ /// struct B2 : A { };
+ /// struct C : B1, B2 { virtual void f(); };
+ ///
+ /// when we determine that C::f() overrides A::f(), we need to update the
+ /// overriders map for both A-in-B1 and A-in-B2 and the subobject offsets map
+ /// will have the subobject offsets for both A copies.
+ typedef llvm::DenseMap<const CXXRecordDecl *, OffsetSetVectorTy>
+ SubobjectOffsetsMapTy;
+
+ /// ComputeFinalOverriders - Compute the final overriders for a given base
+ /// subobject (and all its direct and indirect bases).
+ void ComputeFinalOverriders(BaseSubobject Base,
+ bool BaseSubobjectIsVisitedVBase,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetsMapTy &Offsets);
+
+ /// AddOverriders - Add the final overriders for this base subobject to the
+ /// map of final overriders.
+ void AddOverriders(BaseSubobject Base, uint64_t OffsetInLayoutClass,
+ SubobjectOffsetsMapTy &Offsets);
+
+ /// PropagateOverrider - Propagate the NewMD overrider to all the functions
+ /// that OldMD overrides. For example, if we have:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B : A { virtual void f(); };
+ /// struct C : B { virtual void f(); };
+ ///
+ /// and we want to override B::f with C::f, we also need to override A::f with
+ /// C::f.
+ void PropagateOverrider(const CXXMethodDecl *OldMD,
+ BaseSubobject NewBase,
+ uint64_t OverriderOffsetInLayoutClass,
+ const CXXMethodDecl *NewMD,
+ SubobjectOffsetsMapTy &Offsets);
+
+ static void MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
+ SubobjectOffsetsMapTy &Offsets);
+
+public:
+ FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ uint64_t MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass);
+
+ /// getOverrider - Get the final overrider for the given method declaration in
+ /// the given base subobject.
+ OverriderInfo getOverrider(BaseSubobject Base,
+ const CXXMethodDecl *MD) const {
+ assert(OverridersMap.count(std::make_pair(Base, MD)) &&
+ "Did not find overrider!");
+
+ return OverridersMap.lookup(std::make_pair(Base, MD));
+ }
+
+ /// getReturnAdjustmentOffset - Get the return adjustment offset for the
+ /// method decl in the given base subobject. Returns an empty base offset if
+ /// no adjustment is needed.
+ BaseOffset getReturnAdjustmentOffset(BaseSubobject Base,
+ const CXXMethodDecl *MD) const {
+ return ReturnAdjustments.lookup(std::make_pair(Base, MD));
+ }
+
+ /// dump - dump the final overriders.
+ void dump() {
+ assert(VisitedVirtualBases.empty() &&
+ "Visited virtual bases aren't empty!");
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0));
+ VisitedVirtualBases.clear();
+ }
+
+ /// dump - dump the final overriders for a base subobject, and all its direct
+ /// and indirect base subobjects.
+ void dump(llvm::raw_ostream &Out, BaseSubobject Base);
+};
+
+#define DUMP_OVERRIDERS 0
+
+FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ uint64_t MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass)
+ : MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()),
+ MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
+
+ // Compute the final overriders.
+ SubobjectOffsetsMapTy Offsets;
+ ComputeFinalOverriders(BaseSubobject(MostDerivedClass, 0),
+ /*BaseSubobjectIsVisitedVBase=*/false,
+ MostDerivedClassOffset, Offsets);
+ VisitedVirtualBases.clear();
+
+#if DUMP_OVERRIDERS
+ // And dump them (for now).
+ dump();
+
+ // Also dump the base offsets (for now).
+ for (SubobjectOffsetsMapTy::const_iterator I = Offsets.begin(),
+ E = Offsets.end(); I != E; ++I) {
+ const OffsetSetVectorTy& OffsetSetVector = I->second;
+
+ llvm::errs() << "Base offsets for ";
+ llvm::errs() << I->first->getQualifiedNameAsString() << '\n';
+
+ for (unsigned I = 0, E = OffsetSetVector.size(); I != E; ++I)
+ llvm::errs() << " " << I << " - " << OffsetSetVector[I] / 8 << '\n';
+ }
+#endif
+}
+
+void FinalOverriders::AddOverriders(BaseSubobject Base,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetsMapTy &Offsets) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ // First, propagate the overrider.
+ PropagateOverrider(MD, Base, OffsetInLayoutClass, MD, Offsets);
+
+ // Add the overrider as the final overrider of itself.
+ OverriderInfo& Overrider = OverridersMap[std::make_pair(Base, MD)];
+ assert(!Overrider.Method && "Overrider should not exist yet!");
+
+ Overrider.Offset = OffsetInLayoutClass;
+ Overrider.Method = MD;
+ }
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedRD,
+ const CXXBasePath &Path) {
+ int64_t NonVirtualOffset = 0;
+
+ unsigned NonVirtualStart = 0;
+ const CXXRecordDecl *VirtualBase = 0;
+
+ // First, look for the virtual base class.
+ for (unsigned I = 0, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ if (Element.Base->isVirtual()) {
+ // FIXME: Can we break when we find the first virtual base?
+ // (If we can't, can't we just iterate over the path in reverse order?)
+ NonVirtualStart = I + 1;
+ QualType VBaseType = Element.Base->getType();
+ VirtualBase =
+ cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
+ }
+ }
+
+ // Now compute the non-virtual offset.
+ for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ // Check the base class offset.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+
+ const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
+
+ NonVirtualOffset += Layout.getBaseClassOffset(Base);
+ }
+
+ // FIXME: This should probably use CharUnits or something. Maybe we should
+ // even change the base offsets in ASTRecordLayout to be specified in
+ // CharUnits.
+ return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset / 8);
+
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *BaseRD,
+ const CXXRecordDecl *DerivedRD) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ assert(false && "Class must be derived from the passed in base class!");
+ return BaseOffset();
+ }
+
+ return ComputeBaseOffset(Context, DerivedRD, Paths.front());
+}
+
+static BaseOffset
+ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
+ const CXXMethodDecl *DerivedMD,
+ const CXXMethodDecl *BaseMD) {
+ const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>();
+ const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
+
+ // Canonicalize the return types.
+ CanQualType CanDerivedReturnType =
+ Context.getCanonicalType(DerivedFT->getResultType());
+ CanQualType CanBaseReturnType =
+ Context.getCanonicalType(BaseFT->getResultType());
+
+ assert(CanDerivedReturnType->getTypeClass() ==
+ CanBaseReturnType->getTypeClass() &&
+ "Types must have same type class!");
+
+ if (CanDerivedReturnType == CanBaseReturnType) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ if (isa<ReferenceType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<ReferenceType>()->getPointeeType();
+ } else if (isa<PointerType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<PointerType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<PointerType>()->getPointeeType();
+ } else {
+ assert(false && "Unexpected return type!");
+ }
+
+ // We need to compare unqualified types here; consider
+ // const T *Base::foo();
+ // T *Derived::foo();
+ if (CanDerivedReturnType.getUnqualifiedType() ==
+ CanBaseReturnType.getUnqualifiedType()) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ const CXXRecordDecl *DerivedRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
+
+ const CXXRecordDecl *BaseRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
+
+ return ComputeBaseOffset(Context, BaseRD, DerivedRD);
+}
+
+void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD,
+ BaseSubobject NewBase,
+ uint64_t OverriderOffsetInLayoutClass,
+ const CXXMethodDecl *NewMD,
+ SubobjectOffsetsMapTy &Offsets) {
+ for (CXXMethodDecl::method_iterator I = OldMD->begin_overridden_methods(),
+ E = OldMD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+ const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent();
+
+ // We want to override OverriddenMD in all subobjects, for example:
+ //
+ /// struct A { virtual void f(); };
+ /// struct B1 : A { };
+ /// struct B2 : A { };
+ /// struct C : B1, B2 { virtual void f(); };
+ ///
+ /// When overriding A::f with C::f we need to do so in both A subobjects.
+ const OffsetSetVectorTy &OffsetVector = Offsets[OverriddenRD];
+
+ // Go through all the subobjects.
+ for (unsigned I = 0, E = OffsetVector.size(); I != E; ++I) {
+ uint64_t Offset = OffsetVector[I];
+
+ BaseSubobject OverriddenSubobject = BaseSubobject(OverriddenRD, Offset);
+ BaseSubobjectMethodPairTy SubobjectAndMethod =
+ std::make_pair(OverriddenSubobject, OverriddenMD);
+
+ OverriderInfo &Overrider = OverridersMap[SubobjectAndMethod];
+
+ assert(Overrider.Method && "Did not find existing overrider!");
+
+ // Check if we need return adjustments or base adjustments.
+ // (We don't want to do this for pure virtual member functions).
+ if (!NewMD->isPure()) {
+ // Get the return adjustment base offset.
+ BaseOffset ReturnBaseOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, NewMD, OverriddenMD);
+
+ if (!ReturnBaseOffset.isEmpty()) {
+ // Store the return adjustment base offset.
+ ReturnAdjustments[SubobjectAndMethod] = ReturnBaseOffset;
+ }
+ }
+
+ // Set the new overrider.
+ Overrider.Offset = OverriderOffsetInLayoutClass;
+ Overrider.Method = NewMD;
+
+ // And propagate it further.
+ PropagateOverrider(OverriddenMD, NewBase, OverriderOffsetInLayoutClass,
+ NewMD, Offsets);
+ }
+ }
+}
+
+void
+FinalOverriders::MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
+ SubobjectOffsetsMapTy &Offsets) {
+ // Iterate over the new offsets.
+ for (SubobjectOffsetsMapTy::const_iterator I = NewOffsets.begin(),
+ E = NewOffsets.end(); I != E; ++I) {
+ const CXXRecordDecl *NewRD = I->first;
+ const OffsetSetVectorTy& NewOffsetVector = I->second;
+
+ OffsetSetVectorTy &OffsetVector = Offsets[NewRD];
+
+ // Merge the new offsets set vector into the old.
+ OffsetVector.insert(NewOffsetVector.begin(), NewOffsetVector.end());
+ }
+}
+
+void FinalOverriders::ComputeFinalOverriders(BaseSubobject Base,
+ bool BaseSubobjectIsVisitedVBase,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetsMapTy &Offsets) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ SubobjectOffsetsMapTy NewOffsets;
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have any virtual member functions.
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ bool IsVisitedVirtualBase = BaseSubobjectIsVisitedVBase;
+ uint64_t BaseOffset;
+ uint64_t BaseOffsetInLayoutClass;
+ if (I->isVirtual()) {
+ if (!VisitedVirtualBases.insert(BaseDecl))
+ IsVisitedVirtualBase = true;
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
+ BaseOffsetInLayoutClass = Layout.getBaseClassOffset(BaseDecl) +
+ OffsetInLayoutClass;
+ }
+
+ // Compute the final overriders for this base.
+ // We always want to compute the final overriders, even if the base is a
+ // visited virtual base. Consider:
+ //
+ // struct A {
+ // virtual void f();
+ // virtual void g();
+ // };
+ //
+ // struct B : virtual A {
+ // void f();
+ // };
+ //
+ // struct C : virtual A {
+ // void g ();
+ // };
+ //
+ // struct D : B, C { };
+ //
+ // Here, we still want to compute the overriders for A as a base of C,
+ // because otherwise we'll miss that C::g overrides A::f.
+ ComputeFinalOverriders(BaseSubobject(BaseDecl, BaseOffset),
+ IsVisitedVirtualBase, BaseOffsetInLayoutClass,
+ NewOffsets);
+ }
+
+ /// Now add the overriders for this particular subobject.
+ /// (We don't want to do this more than once for a virtual base).
+ if (!BaseSubobjectIsVisitedVBase)
+ AddOverriders(Base, OffsetInLayoutClass, NewOffsets);
+
+ // And merge the newly discovered subobject offsets.
+ MergeSubobjectOffsets(NewOffsets, Offsets);
+
+ /// Finally, add the offset for our own subobject.
+ Offsets[RD].insert(Base.getBaseOffset());
+}
+
+void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have any virtual member functions.
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ uint64_t BaseOffset;
+ if (I->isVirtual()) {
+ if (!VisitedVirtualBases.insert(BaseDecl)) {
+ // We've visited this base before.
+ continue;
+ }
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffset = Layout.getBaseClassOffset(BaseDecl) +
+ Base.getBaseOffset();
+ }
+
+ dump(Out, BaseSubobject(BaseDecl, BaseOffset));
+ }
+
+ Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
+ Out << Base.getBaseOffset() / 8 << ")\n";
+
+ // Now dump the overriders for this base subobject.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ OverriderInfo Overrider = getOverrider(Base, MD);
+
+ Out << " " << MD->getQualifiedNameAsString() << " - (";
+ Out << Overrider.Method->getQualifiedNameAsString();
+ Out << ", " << ", " << Overrider.Offset / 8 << ')';
+
+ AdjustmentOffsetsMapTy::const_iterator AI =
+ ReturnAdjustments.find(std::make_pair(Base, MD));
+ if (AI != ReturnAdjustments.end()) {
+ const BaseOffset &Offset = AI->second;
+
+ Out << " [ret-adj: ";
+ if (Offset.VirtualBase)
+ Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
+
+ Out << Offset.NonVirtualOffset << " nv]";
+ }
+
+ Out << "\n";
+ }
+}
+
+/// VTableComponent - Represents a single component in a vtable.
+class VTableComponent {
+public:
+ enum Kind {
+ CK_VCallOffset,
+ CK_VBaseOffset,
+ CK_OffsetToTop,
+ CK_RTTI,
+ CK_FunctionPointer,
+
+ /// CK_CompleteDtorPointer - A pointer to the complete destructor.
+ CK_CompleteDtorPointer,
+
+ /// CK_DeletingDtorPointer - A pointer to the deleting destructor.
+ CK_DeletingDtorPointer,
+
+ /// CK_UnusedFunctionPointer - In some cases, a vtable function pointer
+ /// will end up never being called. Such vtable function pointers are
+ /// represented as a CK_UnusedFunctionPointer.
+ CK_UnusedFunctionPointer
+ };
+
+ static VTableComponent MakeVCallOffset(int64_t Offset) {
+ return VTableComponent(CK_VCallOffset, Offset);
+ }
+
+ static VTableComponent MakeVBaseOffset(int64_t Offset) {
+ return VTableComponent(CK_VBaseOffset, Offset);
+ }
+
+ static VTableComponent MakeOffsetToTop(int64_t Offset) {
+ return VTableComponent(CK_OffsetToTop, Offset);
+ }
+
+ static VTableComponent MakeRTTI(const CXXRecordDecl *RD) {
+ return VTableComponent(CK_RTTI, reinterpret_cast<uintptr_t>(RD));
+ }
+
+ static VTableComponent MakeFunction(const CXXMethodDecl *MD) {
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Don't use MakeFunction with destructors!");
+
+ return VTableComponent(CK_FunctionPointer,
+ reinterpret_cast<uintptr_t>(MD));
+ }
+
+ static VTableComponent MakeCompleteDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_CompleteDtorPointer,
+ reinterpret_cast<uintptr_t>(DD));
+ }
+
+ static VTableComponent MakeDeletingDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_DeletingDtorPointer,
+ reinterpret_cast<uintptr_t>(DD));
+ }
+
+ static VTableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Don't use MakeUnusedFunction with destructors!");
+ return VTableComponent(CK_UnusedFunctionPointer,
+ reinterpret_cast<uintptr_t>(MD));
+ }
+
+ static VTableComponent getFromOpaqueInteger(uint64_t I) {
+ return VTableComponent(I);
+ }
+
+ /// getKind - Get the kind of this vtable component.
+ Kind getKind() const {
+ return (Kind)(Value & 0x7);
+ }
+
+ int64_t getVCallOffset() const {
+ assert(getKind() == CK_VCallOffset && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ int64_t getVBaseOffset() const {
+ assert(getKind() == CK_VBaseOffset && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ int64_t getOffsetToTop() const {
+ assert(getKind() == CK_OffsetToTop && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ const CXXRecordDecl *getRTTIDecl() const {
+ assert(getKind() == CK_RTTI && "Invalid component kind!");
+
+ return reinterpret_cast<CXXRecordDecl *>(getPointer());
+ }
+
+ const CXXMethodDecl *getFunctionDecl() const {
+ assert(getKind() == CK_FunctionPointer);
+
+ return reinterpret_cast<CXXMethodDecl *>(getPointer());
+ }
+
+ const CXXDestructorDecl *getDestructorDecl() const {
+ assert((getKind() == CK_CompleteDtorPointer ||
+ getKind() == CK_DeletingDtorPointer) && "Invalid component kind!");
+
+ return reinterpret_cast<CXXDestructorDecl *>(getPointer());
+ }
+
+ const CXXMethodDecl *getUnusedFunctionDecl() const {
+ assert(getKind() == CK_UnusedFunctionPointer);
+
+ return reinterpret_cast<CXXMethodDecl *>(getPointer());
+ }
+
+private:
+ VTableComponent(Kind ComponentKind, int64_t Offset) {
+ assert((ComponentKind == CK_VCallOffset ||
+ ComponentKind == CK_VBaseOffset ||
+ ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
+ assert(Offset <= ((1LL << 56) - 1) && "Offset is too big!");
+
+ Value = ((Offset << 3) | ComponentKind);
+ }
+
+ VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
+ assert((ComponentKind == CK_RTTI ||
+ ComponentKind == CK_FunctionPointer ||
+ ComponentKind == CK_CompleteDtorPointer ||
+ ComponentKind == CK_DeletingDtorPointer ||
+ ComponentKind == CK_UnusedFunctionPointer) &&
+ "Invalid component kind!");
+
+ assert((Ptr & 7) == 0 && "Pointer not sufficiently aligned!");
+
+ Value = Ptr | ComponentKind;
+ }
+
+ int64_t getOffset() const {
+ assert((getKind() == CK_VCallOffset || getKind() == CK_VBaseOffset ||
+ getKind() == CK_OffsetToTop) && "Invalid component kind!");
+
+ return Value >> 3;
+ }
+
+ uintptr_t getPointer() const {
+ assert((getKind() == CK_RTTI ||
+ getKind() == CK_FunctionPointer ||
+ getKind() == CK_CompleteDtorPointer ||
+ getKind() == CK_DeletingDtorPointer ||
+ getKind() == CK_UnusedFunctionPointer) &&
+ "Invalid component kind!");
+
+ return static_cast<uintptr_t>(Value & ~7ULL);
+ }
+
+ explicit VTableComponent(uint64_t Value)
+ : Value(Value) { }
+
+ /// The kind is stored in the lower 3 bits of the value. For offsets, we
+ /// make use of the facts that classes can't be larger than 2^55 bytes,
+ /// so we store the offset in the lower part of the 61 bytes that remain.
+ /// (The reason that we're not simply using a PointerIntPair here is that we
+ /// need the offsets to be 64-bit, even when on a 32-bit machine).
+ int64_t Value;
+};
+
+/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
+struct VCallOffsetMap {
+
+ typedef std::pair<const CXXMethodDecl *, int64_t> MethodAndOffsetPairTy;
+
+ /// Offsets - Keeps track of methods and their offsets.
+ // FIXME: This should be a real map and not a vector.
+ llvm::SmallVector<MethodAndOffsetPairTy, 16> Offsets;
+
+ /// MethodsCanShareVCallOffset - Returns whether two virtual member functions
+ /// can share the same vcall offset.
+ static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS);
+
+public:
+ /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
+ /// add was successful, or false if there was already a member function with
+ /// the same signature in the map.
+ bool AddVCallOffset(const CXXMethodDecl *MD, int64_t OffsetOffset);
+
+ /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
+ /// vtable address point) for the given virtual member function.
+ int64_t getVCallOffsetOffset(const CXXMethodDecl *MD);
+
+ // empty - Return whether the offset map is empty or not.
+ bool empty() const { return Offsets.empty(); }
+};
+
+static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ ASTContext &C = LHS->getASTContext(); // TODO: thread this down
+ CanQual<FunctionProtoType>
+ LT = C.getCanonicalType(LHS->getType()).getAs<FunctionProtoType>(),
+ RT = C.getCanonicalType(RHS->getType()).getAs<FunctionProtoType>();
+
+ // Fast-path matches in the canonical types.
+ if (LT == RT) return true;
+
+ // Force the signatures to match. We can't rely on the overrides
+ // list here because there isn't necessarily an inheritance
+ // relationship between the two methods.
+ if (LT.getQualifiers() != RT.getQualifiers() ||
+ LT->getNumArgs() != RT->getNumArgs())
+ return false;
+ for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
+ if (LT->getArgType(I) != RT->getArgType(I))
+ return false;
+ return true;
+}
+
+bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ assert(LHS->isVirtual() && "LHS must be virtual!");
+ assert(RHS->isVirtual() && "LHS must be virtual!");
+
+ // A destructor can share a vcall offset with another destructor.
+ if (isa<CXXDestructorDecl>(LHS))
+ return isa<CXXDestructorDecl>(RHS);
+
+ // FIXME: We need to check more things here.
+
+ // The methods must have the same name.
+ DeclarationName LHSName = LHS->getDeclName();
+ DeclarationName RHSName = RHS->getDeclName();
+ if (LHSName != RHSName)
+ return false;
+
+ // And the same signatures.
+ return HasSameVirtualSignature(LHS, RHS);
+}
+
+bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
+ int64_t OffsetOffset) {
+ // Check if we can reuse an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return false;
+ }
+
+ // Add the offset.
+ Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
+ return true;
+}
+
+int64_t VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
+ // Look for an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return Offsets[I].second;
+ }
+
+ assert(false && "Should always find a vcall offset offset!");
+ return 0;
+}
+
+/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
+class VCallAndVBaseOffsetBuilder {
+public:
+ typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ VBaseOffsetOffsetsMapTy;
+
+private:
+ /// MostDerivedClass - The most derived class for which we're building vcall
+ /// and vbase offsets.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// Components - vcall and vbase offset components
+ typedef llvm::SmallVector<VTableComponent, 64> VTableComponentVectorTy;
+ VTableComponentVectorTy Components;
+
+ /// VisitedVirtualBases - Visited virtual bases.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// VCallOffsets - Keeps track of vcall offsets.
+ VCallOffsetMap VCallOffsets;
+
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
+ /// relative to the address point.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ /// (Can be null when we're not building a vtable of the most derived class).
+ const FinalOverriders *Overriders;
+
+ /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
+ /// given base subobject.
+ void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
+ uint64_t RealBaseOffset);
+
+ /// AddVCallOffsets - Add vcall offsets for the given base subobject.
+ void AddVCallOffsets(BaseSubobject Base, uint64_t VBaseOffset);
+
+ /// AddVBaseOffsets - Add vbase offsets for the given class.
+ void AddVBaseOffsets(const CXXRecordDecl *Base, uint64_t OffsetInLayoutClass);
+
+ /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
+ /// bytes, relative to the vtable address point.
+ int64_t getCurrentOffsetOffset() const;
+
+public:
+ VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ const CXXRecordDecl *LayoutClass,
+ const FinalOverriders *Overriders,
+ BaseSubobject Base, bool BaseIsVirtual,
+ uint64_t OffsetInLayoutClass)
+ : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+
+ // Add vcall and vbase offsets.
+ AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
+ }
+
+ /// Methods for iterating over the components.
+ typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
+ const_iterator components_begin() const { return Components.rbegin(); }
+ const_iterator components_end() const { return Components.rend(); }
+
+ const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+};
+
+void
+VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
+ bool BaseIsVirtual,
+ uint64_t RealBaseOffset) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
+
+ // Itanium C++ ABI 2.5.2:
+ // ..in classes sharing a virtual table with a primary base class, the vcall
+ // and vbase offsets added by the derived class all come before the vcall
+ // and vbase offsets required by the base class, so that the latter may be
+ // laid out as required by the base class without regard to additions from
+ // the derived class(es).
+
+ // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
+ // emit them for the primary base first).
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ bool PrimaryBaseIsVirtual = Layout.getPrimaryBaseWasVirtual();
+
+ uint64_t PrimaryBaseOffset;
+
+ // Get the base offset of the primary base.
+ if (PrimaryBaseIsVirtual) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ }
+
+ AddVCallAndVBaseOffsets(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseIsVirtual, RealBaseOffset);
+ }
+
+ AddVBaseOffsets(Base.getBase(), RealBaseOffset);
+
+ // We only want to add vcall offsets for virtual bases.
+ if (BaseIsVirtual)
+ AddVCallOffsets(Base, RealBaseOffset);
+}
+
+int64_t VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
+ // OffsetIndex is the index of this vcall or vbase offset, relative to the
+ // vtable address point. (We subtract 3 to account for the information just
+ // above the address point, the RTTI info, the offset to top, and the
+ // vcall offset itself).
+ int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+
+ // FIXME: We shouldn't use / 8 here.
+ int64_t OffsetOffset = OffsetIndex *
+ (int64_t)Context.Target.getPointerWidth(0) / 8;
+
+ return OffsetOffset;
+}
+
+void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
+ uint64_t VBaseOffset) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Handle the primary base first.
+ // We only want to add vcall offsets if the base is non-virtual; a virtual
+ // primary base will have its vcall and vbase offsets emitted already.
+ if (PrimaryBase && !Layout.getPrimaryBaseWasVirtual()) {
+ // Get the base offset of the primary base.
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
+ VBaseOffset);
+ }
+
+ // Add the vcall offsets.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ int64_t OffsetOffset = getCurrentOffsetOffset();
+
+ // Don't add a vcall offset if we already have one for this member function
+ // signature.
+ if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
+ continue;
+
+ int64_t Offset = 0;
+
+ if (Overriders) {
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders->getOverrider(Base, MD);
+
+ /// The vcall offset is the offset from the virtual base to the object
+ /// where the function was overridden.
+ // FIXME: We should not use / 8 here.
+ Offset = (int64_t)(Overrider.Offset - VBaseOffset) / 8;
+ }
+
+ Components.push_back(VTableComponent::MakeVCallOffset(Offset));
+ }
+
+ // And iterate over all non-virtual bases (ignoring the primary base).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl == PrimaryBase)
+ continue;
+
+ // Get the base offset of this base.
+ uint64_t BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset), VBaseOffset);
+ }
+}
+
+void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
+ uint64_t OffsetInLayoutClass) {
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Add vbase offsets.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base that we haven't visited before.
+ if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+ // FIXME: We shouldn't use / 8 here.
+ int64_t Offset =
+ (int64_t)(LayoutClassLayout.getVBaseClassOffset(BaseDecl) -
+ OffsetInLayoutClass) / 8;
+
+ // Add the vbase offset offset.
+ assert(!VBaseOffsetOffsets.count(BaseDecl) &&
+ "vbase offset offset already exists!");
+
+ int64_t VBaseOffsetOffset = getCurrentOffsetOffset();
+ VBaseOffsetOffsets.insert(std::make_pair(BaseDecl, VBaseOffsetOffset));
+
+ Components.push_back(VTableComponent::MakeVBaseOffset(Offset));
+ }
+
+ // Check the base class looking for more vbase offsets.
+ AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
+ }
+}
+
+/// VTableBuilder - Class for building vtable layout information.
+class VTableBuilder {
+public:
+ /// PrimaryBasesSetVectorTy - A set vector of direct and indirect
+ /// primary bases.
+ typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
+ PrimaryBasesSetVectorTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ VBaseOffsetOffsetsMapTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t>
+ AddressPointsMapTy;
+
+private:
+ /// VTables - Global vtable information.
+ CodeGenVTables &VTables;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building a construction vtable, this
+ /// holds the offset from the layout class to the most derived class.
+ const uint64_t MostDerivedClassOffset;
+
+ /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
+ /// base. (This only makes sense when building a construction vtable).
+ bool MostDerivedClassIsVirtual;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ const FinalOverriders Overriders;
+
+ /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
+ /// bases in this vtable.
+ llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
+ /// the most derived class.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// Components - The components of the vtable being built.
+ llvm::SmallVector<VTableComponent, 64> Components;
+
+ /// AddressPoints - Address points for the vtable being built.
+ AddressPointsMapTy AddressPoints;
+
+ /// MethodInfo - Contains information about a method in a vtable.
+ /// (Used for computing 'this' pointer adjustment thunks.
+ struct MethodInfo {
+ /// BaseOffset - The base offset of this method.
+ const uint64_t BaseOffset;
+
+ /// BaseOffsetInLayoutClass - The base offset in the layout class of this
+ /// method.
+ const uint64_t BaseOffsetInLayoutClass;
+
+ /// VTableIndex - The index in the vtable that this method has.
+ /// (For destructors, this is the index of the complete destructor).
+ const uint64_t VTableIndex;
+
+ MethodInfo(uint64_t BaseOffset, uint64_t BaseOffsetInLayoutClass,
+ uint64_t VTableIndex)
+ : BaseOffset(BaseOffset),
+ BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
+ VTableIndex(VTableIndex) { }
+
+ MethodInfo() : BaseOffset(0), BaseOffsetInLayoutClass(0), VTableIndex(0) { }
+ };
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
+
+ /// MethodInfoMap - The information for all methods in the vtable we're
+ /// currently building.
+ MethodInfoMapTy MethodInfoMap;
+
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
+
+ /// VTableThunks - The thunks by vtable index in the vtable currently being
+ /// built.
+ VTableThunksMapTy VTableThunks;
+
+ typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - A map that contains all the thunks needed for all methods in the
+ /// most derived class for which the vtable is currently being built.
+ ThunksMapTy Thunks;
+
+ /// AddThunk - Add a thunk for the given method.
+ void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
+
+ /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
+ /// part of the vtable we're currently building.
+ void ComputeThisAdjustments();
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// PrimaryVirtualBases - All known virtual bases who are a primary base of
+ /// some other base.
+ VisitedVirtualBasesSetTy PrimaryVirtualBases;
+
+ /// ComputeReturnAdjustment - Compute the return adjustment given a return
+ /// adjustment base offset.
+ ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
+
+ /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
+ /// the 'this' pointer from the base subobject to the derived subobject.
+ BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const;
+
+ /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
+ /// given virtual member function, its offset in the layout class and its
+ /// final overrider.
+ ThisAdjustment
+ ComputeThisAdjustment(const CXXMethodDecl *MD,
+ uint64_t BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider);
+
+ /// AddMethod - Add a single virtual member function to the vtable
+ /// components vector.
+ void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
+
+ /// IsOverriderUsed - Returns whether the overrider will ever be used in this
+ /// part of the vtable.
+ ///
+ /// Itanium C++ ABI 2.5.2:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B : virtual public A { int i; };
+ /// struct C : virtual public A { int j; };
+ /// struct D : public B, public C {};
+ ///
+ /// When B and C are declared, A is a primary base in each case, so although
+ /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
+ /// adjustment is required and no thunk is generated. However, inside D
+ /// objects, A is no longer a primary base of C, so if we allowed calls to
+ /// C::f() to use the copy of A's vtable in the C subobject, we would need
+ /// to adjust this from C* to B::A*, which would require a third-party
+ /// thunk. Since we require that a call to C::f() first convert to A*,
+ /// C-in-D's copy of A's vtable is never referenced, so this is not
+ /// necessary.
+ bool IsOverriderUsed(const CXXMethodDecl *Overrider,
+ uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass) const;
+
+
+ /// AddMethods - Add the methods of this base subobject and all its
+ /// primary bases to the vtable components vector.
+ void AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases);
+
+ // LayoutVTable - Layout the vtable for the given base class, including its
+ // secondary vtables and any vtables for virtual bases.
+ void LayoutVTable();
+
+ /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
+ /// given base subobject, as well as all its secondary vtables.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
+ /// in the layout class.
+ void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ uint64_t OffsetInLayoutClass);
+
+ /// LayoutSecondaryVTables - Layout the secondary vtables for the given base
+ /// subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ uint64_t OffsetInLayoutClass);
+
+ /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
+ /// class hierarchy.
+ void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ uint64_t OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
+ /// given base (excluding any primary bases).
+ void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// isBuildingConstructionVTable - Return whether this vtable builder is
+ /// building a construction vtable.
+ bool isBuildingConstructorVTable() const {
+ return MostDerivedClass != LayoutClass;
+ }
+
+public:
+ VTableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass,
+ uint64_t MostDerivedClassOffset, bool MostDerivedClassIsVirtual,
+ const CXXRecordDecl *LayoutClass)
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset),
+ MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
+
+ LayoutVTable();
+ }
+
+ ThunksMapTy::const_iterator thunks_begin() const {
+ return Thunks.begin();
+ }
+
+ ThunksMapTy::const_iterator thunks_end() const {
+ return Thunks.end();
+ }
+
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+
+ /// getNumVTableComponents - Return the number of components in the vtable
+ /// currently built.
+ uint64_t getNumVTableComponents() const {
+ return Components.size();
+ }
+
+ const uint64_t *vtable_components_data_begin() const {
+ return reinterpret_cast<const uint64_t *>(Components.begin());
+ }
+
+ const uint64_t *vtable_components_data_end() const {
+ return reinterpret_cast<const uint64_t *>(Components.end());
+ }
+
+ AddressPointsMapTy::const_iterator address_points_begin() const {
+ return AddressPoints.begin();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_end() const {
+ return AddressPoints.end();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ return VTableThunks.begin();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_end() const {
+ return VTableThunks.end();
+ }
+
+ /// dumpLayout - Dump the vtable layout.
+ void dumpLayout(llvm::raw_ostream&);
+};
+
+void VTableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
+ assert(!isBuildingConstructorVTable() &&
+ "Can't add thunks for construction vtable");
+
+ llvm::SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
+
+ // Check if we have this thunk already.
+ if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
+ ThunksVector.end())
+ return;
+
+ ThunksVector.push_back(Thunk);
+}
+
+typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
+
+/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
+/// the overridden methods that the function decl overrides.
+static void
+ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
+ OverriddenMethodsSetTy& OverriddenMethods) {
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ OverriddenMethods.insert(OverriddenMD);
+
+ ComputeAllOverriddenMethods(OverriddenMD, OverriddenMethods);
+ }
+}
+
+void VTableBuilder::ComputeThisAdjustments() {
+ // Now go through the method info map and see if any of the methods need
+ // 'this' pointer adjustments.
+ for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
+ E = MethodInfoMap.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const MethodInfo &MethodInfo = I->second;
+
+ // Ignore adjustments for unused function pointers.
+ uint64_t VTableIndex = MethodInfo.VTableIndex;
+ if (Components[VTableIndex].getKind() ==
+ VTableComponent::CK_UnusedFunctionPointer)
+ continue;
+
+ // Get the final overrider for this method.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(BaseSubobject(MD->getParent(),
+ MethodInfo.BaseOffset), MD);
+
+ // Check if we need an adjustment at all.
+ if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
+ // When a return thunk is needed by a derived class that overrides a
+ // virtual base, gcc uses a virtual 'this' adjustment as well.
+ // While the thunk itself might be needed by vtables in subclasses or
+ // in construction vtables, there doesn't seem to be a reason for using
+ // the thunk in this vtable. Still, we do so to match gcc.
+ if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
+ continue;
+ }
+
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
+
+ if (ThisAdjustment.isEmpty())
+ continue;
+
+ // Add it.
+ VTableThunks[VTableIndex].This = ThisAdjustment;
+
+ if (isa<CXXDestructorDecl>(MD)) {
+ // Add an adjustment for the deleting destructor as well.
+ VTableThunks[VTableIndex + 1].This = ThisAdjustment;
+ }
+ }
+
+ /// Clear the method info map.
+ MethodInfoMap.clear();
+
+ if (isBuildingConstructorVTable()) {
+ // We don't need to store thunk information for construction vtables.
+ return;
+ }
+
+ for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
+ E = VTableThunks.end(); I != E; ++I) {
+ const VTableComponent &Component = Components[I->first];
+ const ThunkInfo &Thunk = I->second;
+ const CXXMethodDecl *MD;
+
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind!");
+ case VTableComponent::CK_FunctionPointer:
+ MD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ MD = Component.getDestructorDecl();
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ // We've already added the thunk when we saw the complete dtor pointer.
+ continue;
+ }
+
+ if (MD->getParent() == MostDerivedClass)
+ AddThunk(MD, Thunk);
+ }
+}
+
+ReturnAdjustment VTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
+ ReturnAdjustment Adjustment;
+
+ if (!Offset.isEmpty()) {
+ if (Offset.VirtualBase) {
+ // Get the virtual base offset offset.
+ if (Offset.DerivedClass == MostDerivedClass) {
+ // We can get the offset offset directly from our map.
+ Adjustment.VBaseOffsetOffset =
+ VBaseOffsetOffsets.lookup(Offset.VirtualBase);
+ } else {
+ Adjustment.VBaseOffsetOffset =
+ VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
+ Offset.VirtualBase);
+ }
+ }
+
+ Adjustment.NonVirtual = Offset.NonVirtualOffset;
+ }
+
+ return Adjustment;
+}
+
+BaseOffset
+VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const {
+ const CXXRecordDecl *BaseRD = Base.getBase();
+ const CXXRecordDecl *DerivedRD = Derived.getBase();
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true,
+ /*RecordPaths=*/true, /*DetectVirtual=*/true);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ assert(false && "Class must be derived from the passed in base class!");
+ return BaseOffset();
+ }
+
+ // We have to go through all the paths, and see which one leads us to the
+ // right base subobject.
+ for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
+ I != E; ++I) {
+ BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
+
+ // FIXME: Should not use * 8 here.
+ uint64_t OffsetToBaseSubobject = Offset.NonVirtualOffset * 8;
+
+ if (Offset.VirtualBase) {
+ // If we have a virtual base class, the non-virtual offset is relative
+ // to the virtual base class offset.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ /// Get the virtual base offset, relative to the most derived class
+ /// layout.
+ OffsetToBaseSubobject +=
+ LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ } else {
+ // Otherwise, the non-virtual offset is relative to the derived class
+ // offset.
+ OffsetToBaseSubobject += Derived.getBaseOffset();
+ }
+
+ // Check if this path gives us the right base subobject.
+ if (OffsetToBaseSubobject == Base.getBaseOffset()) {
+ // Since we're going from the base class _to_ the derived class, we'll
+ // invert the non-virtual offset here.
+ Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
+ return Offset;
+ }
+ }
+
+ return BaseOffset();
+}
+
+ThisAdjustment
+VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
+ uint64_t BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) {
+ // Ignore adjustments for pure virtual member functions.
+ if (Overrider.Method->isPure())
+ return ThisAdjustment();
+
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ BaseOffsetInLayoutClass);
+
+ BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
+ Overrider.Offset);
+
+ // Compute the adjustment offset.
+ BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
+ OverriderBaseSubobject);
+ if (Offset.isEmpty())
+ return ThisAdjustment();
+
+ ThisAdjustment Adjustment;
+
+ if (Offset.VirtualBase) {
+ // Get the vcall offset map for this virtual base.
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
+
+ if (VCallOffsets.empty()) {
+ // We don't have vcall offsets for this virtual base, go ahead and
+ // build them.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
+ /*FinalOverriders=*/0,
+ BaseSubobject(Offset.VirtualBase, 0),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/0);
+
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ Adjustment.VCallOffsetOffset = VCallOffsets.getVCallOffsetOffset(MD);
+ }
+
+ // Set the non-virtual part of the adjustment.
+ Adjustment.NonVirtual = Offset.NonVirtualOffset;
+
+ return Adjustment;
+}
+
+void
+VTableBuilder::AddMethod(const CXXMethodDecl *MD,
+ ReturnAdjustment ReturnAdjustment) {
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(ReturnAdjustment.isEmpty() &&
+ "Destructor can't have return adjustment!");
+
+ // Add both the complete destructor and the deleting destructor.
+ Components.push_back(VTableComponent::MakeCompleteDtor(DD));
+ Components.push_back(VTableComponent::MakeDeletingDtor(DD));
+ } else {
+ // Add the return adjustment if necessary.
+ if (!ReturnAdjustment.isEmpty())
+ VTableThunks[Components.size()].Return = ReturnAdjustment;
+
+ // Add the function.
+ Components.push_back(VTableComponent::MakeFunction(MD));
+ }
+}
+
+/// OverridesIndirectMethodInBase - Return whether the given member function
+/// overrides any methods in the set of given bases.
+/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
+/// For example, if we have:
+///
+/// struct A { virtual void f(); }
+/// struct B : A { virtual void f(); }
+/// struct C : B { virtual void f(); }
+///
+/// OverridesIndirectMethodInBase will return true if given C::f as the method
+/// and { A } as the set of bases.
+static bool
+OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ if (Bases.count(MD->getParent()))
+ return true;
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // Check "indirect overriders".
+ if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
+ return true;
+ }
+
+ return false;
+}
+
+bool
+VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
+ uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass) const {
+ // If the base and the first base in the primary base chain have the same
+ // offsets, then this overrider will be used.
+ if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
+ return true;
+
+ // We know now that Base (or a direct or indirect base of it) is a primary
+ // base in part of the class hierarchy, but not a primary base in the most
+ // derived class.
+
+ // If the overrider is the first base in the primary base chain, we know
+ // that the overrider will be used.
+ if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
+ return true;
+
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+
+ const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
+ PrimaryBases.insert(RD);
+
+ // Now traverse the base chain, starting with the first base, until we find
+ // the base that is no longer a primary base.
+ while (true) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Now check if this is the primary base that is not a primary base in the
+ // most derived class.
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ FirstBaseOffsetInLayoutClass) {
+ // We found it, stop walking the chain.
+ break;
+ }
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+ }
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ assert(false && "Found a duplicate primary base!");
+
+ RD = PrimaryBase;
+ }
+
+ // If the final overrider is an override of one of the primary bases,
+ // then we know that it will be used.
+ return OverridesIndirectMethodInBases(Overrider, PrimaryBases);
+}
+
+/// FindNearestOverriddenMethod - Given a method, returns the overridden method
+/// from the nearest base. Returns null if no method was found.
+static const CXXMethodDecl *
+FindNearestOverriddenMethod(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ OverriddenMethodsSetTy OverriddenMethods;
+ ComputeAllOverriddenMethods(MD, OverriddenMethods);
+
+ for (int I = Bases.size(), E = 0; I != E; --I) {
+ const CXXRecordDecl *PrimaryBase = Bases[I - 1];
+
+ // Now check the overriden methods.
+ for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
+ E = OverriddenMethods.end(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // We found our overridden method.
+ if (OverriddenMD->getParent() == PrimaryBase)
+ return OverriddenMD;
+ }
+ }
+
+ return 0;
+}
+
+void
+VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ uint64_t FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ uint64_t PrimaryBaseOffset;
+ uint64_t PrimaryBaseOffsetInLayoutClass;
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass;
+ }
+
+ AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ assert(false && "Found a duplicate primary base!");
+ }
+
+ // Now go through all virtual member functions and add them.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(Base, MD);
+
+ // Check if this virtual member function overrides a method in a primary
+ // base. If this is the case, and the return type doesn't require adjustment
+ // then we can just use the member function from the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ if (ComputeReturnAdjustmentBaseOffset(Context, MD,
+ OverriddenMD).isEmpty()) {
+ // Replace the method info of the overridden method with our own
+ // method.
+ assert(MethodInfoMap.count(OverriddenMD) &&
+ "Did not find the overridden method!");
+ MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
+
+ MethodInfo MethodInfo(Base.getBaseOffset(),
+ BaseOffsetInLayoutClass,
+ OverriddenMethodInfo.VTableIndex);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+ MethodInfoMap.erase(OverriddenMD);
+
+ // If the overridden method exists in a virtual base class or a direct
+ // or indirect base class of a virtual base class, we need to emit a
+ // thunk if we ever have a class hierarchy where the base class is not
+ // a primary base in the complete object.
+ if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
+ // Compute the this adjustment.
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
+ Overrider);
+
+ if (ThisAdjustment.VCallOffsetOffset &&
+ Overrider.Method->getParent() == MostDerivedClass) {
+ // This is a virtual thunk for the most derived class, add it.
+ AddThunk(Overrider.Method,
+ ThunkInfo(ThisAdjustment, ReturnAdjustment()));
+ }
+ }
+
+ continue;
+ }
+ }
+
+ // Insert the method info for this method.
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ Components.size());
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+
+ // Check if this overrider is going to be used.
+ const CXXMethodDecl *OverriderMD = Overrider.Method;
+ if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
+ FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass)) {
+ Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
+ continue;
+ }
+
+ // Check if this overrider needs a return adjustment.
+ BaseOffset ReturnAdjustmentOffset =
+ Overriders.getReturnAdjustmentOffset(Base, MD);
+
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ AddMethod(Overrider.Method, ReturnAdjustment);
+ }
+}
+
+void VTableBuilder::LayoutVTable() {
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass, 0),
+ /*BaseIsMorallyVirtual=*/false,
+ MostDerivedClassIsVirtual,
+ MostDerivedClassOffset);
+
+ VisitedVirtualBasesSetTy VBases;
+
+ // Determine the primary virtual bases.
+ DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
+ VBases);
+ VBases.clear();
+
+ LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
+}
+
+void
+VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ uint64_t OffsetInLayoutClass) {
+ assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
+
+ // Add vcall and vbase offsets for this vtable.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
+ Base, BaseIsVirtualInLayoutClass,
+ OffsetInLayoutClass);
+ Components.append(Builder.components_begin(), Builder.components_end());
+
+ // Check if we need to add these vcall offsets.
+ if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
+
+ if (VCallOffsets.empty())
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ // If we're laying out the most derived class we want to keep track of the
+ // virtual base class offset offsets.
+ if (Base.getBase() == MostDerivedClass)
+ VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
+
+ // Add the offset to top.
+ // FIXME: We should not use / 8 here.
+ int64_t OffsetToTop = -(int64_t)(OffsetInLayoutClass -
+ MostDerivedClassOffset) / 8;
+ Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop));
+
+ // Next, add the RTTI.
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
+
+ uint64_t AddressPoint = Components.size();
+
+ // Now go through all virtual member functions and add them.
+ PrimaryBasesSetVectorTy PrimaryBases;
+ AddMethods(Base, OffsetInLayoutClass, Base.getBase(), OffsetInLayoutClass,
+ PrimaryBases);
+
+ // Compute 'this' pointer adjustments.
+ ComputeThisAdjustments();
+
+ // Add all address points.
+ const CXXRecordDecl *RD = Base.getBase();
+ while (true) {
+ AddressPoints.insert(std::make_pair(BaseSubobject(RD, OffsetInLayoutClass),
+ AddressPoint));
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ // Check if this virtual primary base is a primary base in the layout
+ // class. If it's not, we don't want to add it.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ OffsetInLayoutClass) {
+ // We don't want to add this class (or any of its primary bases).
+ break;
+ }
+ }
+
+ RD = PrimaryBase;
+ }
+
+ // Layout secondary vtables.
+ LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
+}
+
+void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ uint64_t OffsetInLayoutClass) {
+ // Itanium C++ ABI 2.5.2:
+ // Following the primary virtual table of a derived class are secondary
+ // virtual tables for each of its proper base classes, except any primary
+ // base(s) with which it shares its primary virtual table.
+
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ // Ignore virtual bases, we'll emit them later.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ if (isBuildingConstructorVTable()) {
+ // Itanium C++ ABI 2.6.4:
+ // Some of the base class subobjects may not need construction virtual
+ // tables, which will therefore not be present in the construction
+ // virtual table group, even though the subobject virtual tables are
+ // present in the main virtual table group for the complete object.
+ if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
+ continue;
+ }
+
+ // Get the base offset of this base.
+ uint64_t RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
+
+ uint64_t BaseOffsetInLayoutClass = OffsetInLayoutClass + RelativeBaseOffset;
+
+ // Don't emit a secondary vtable for a primary base. We might however want
+ // to emit secondary vtables for other bases of this base.
+ if (BaseDecl == PrimaryBase) {
+ LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
+ continue;
+ }
+
+ // Layout the primary vtable (and any secondary vtables) for this base.
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual,
+ /*BaseIsVirtualInLayoutClass=*/false,
+ BaseOffsetInLayoutClass);
+ }
+}
+
+void
+VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ uint64_t OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Check if this base has a primary base.
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+
+ // Check if it's virtual.
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ bool IsPrimaryVirtualBase = true;
+
+ if (isBuildingConstructorVTable()) {
+ // Check if the base is actually a primary base in the class we use for
+ // layout.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ uint64_t PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ // We know that the base is not a primary base in the layout class if
+ // the base offsets are different.
+ if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass)
+ IsPrimaryVirtualBase = false;
+ }
+
+ if (IsPrimaryVirtualBase)
+ PrimaryVirtualBases.insert(PrimaryBase);
+ }
+ }
+
+ // Traverse bases, looking for more primary virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t BaseOffsetInLayoutClass;
+
+ if (I->isVirtual()) {
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffsetInLayoutClass = LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
+ }
+
+ DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
+ }
+}
+
+void
+VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ // Itanium C++ ABI 2.5.2:
+ // Then come the virtual base virtual tables, also in inheritance graph
+ // order, and again excluding primary bases (which share virtual tables with
+ // the classes for which they are primary).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this base needs a vtable. (If it's virtual, not a primary base
+ // of some other class, and we haven't visited it before).
+ if (I->isVirtual() && BaseDecl->isDynamicClass() &&
+ !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+ uint64_t BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+ uint64_t BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsMorallyVirtual=*/true,
+ /*BaseIsVirtualInLayoutClass=*/true,
+ BaseOffsetInLayoutClass);
+ }
+
+ // We only need to check the base for virtual base vtables if it actually
+ // has virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVTablesForVirtualBases(BaseDecl, VBases);
+ }
+}
+
+/// dumpLayout - Dump the vtable layout.
+void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
+
+ if (isBuildingConstructorVTable()) {
+ Out << "Construction vtable for ('";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
+ // FIXME: Don't use / 8 .
+ Out << MostDerivedClassOffset / 8 << ") in '";
+ Out << LayoutClass->getQualifiedNameAsString();
+ } else {
+ Out << "Vtable for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ }
+ Out << "' (" << Components.size() << " entries).\n";
+
+ // Iterate through the address points and insert them into a new map where
+ // they are keyed by the index and not the base object.
+ // Since an address point can be shared by multiple subobjects, we use an
+ // STL multimap.
+ std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
+ for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
+ E = AddressPoints.end(); I != E; ++I) {
+ const BaseSubobject& Base = I->first;
+ uint64_t Index = I->second;
+
+ AddressPointsByIndex.insert(std::make_pair(Index, Base));
+ }
+
+ for (unsigned I = 0, E = Components.size(); I != E; ++I) {
+ uint64_t Index = I;
+
+ Out << llvm::format("%4d | ", I);
+
+ const VTableComponent &Component = Components[I];
+
+ // Dump the component.
+ switch (Component.getKind()) {
+
+ case VTableComponent::CK_VCallOffset:
+ Out << "vcall_offset (" << Component.getVCallOffset() << ")";
+ break;
+
+ case VTableComponent::CK_VBaseOffset:
+ Out << "vbase_offset (" << Component.getVBaseOffset() << ")";
+ break;
+
+ case VTableComponent::CK_OffsetToTop:
+ Out << "offset_to_top (" << Component.getOffsetToTop() << ")";
+ break;
+
+ case VTableComponent::CK_RTTI:
+ Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+ break;
+
+ case VTableComponent::CK_FunctionPointer: {
+ const CXXMethodDecl *MD = Component.getFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this function pointer has a return adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "\n [return adjustment: ";
+ Out << Thunk.Return.NonVirtual << " non-virtual";
+
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ Out << ']';
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ bool IsComplete =
+ Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
+
+ const CXXDestructorDecl *DD = Component.getDestructorDecl();
+
+ Out << DD->getQualifiedNameAsString();
+ if (IsComplete)
+ Out << "() [complete]";
+ else
+ Out << "() [deleting]";
+
+ if (DD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this destructor has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer: {
+ const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << "[unused] " << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+ }
+
+ }
+
+ Out << '\n';
+
+ // Dump the next address point.
+ uint64_t NextIndex = Index + 1;
+ if (AddressPointsByIndex.count(NextIndex)) {
+ if (AddressPointsByIndex.count(NextIndex) == 1) {
+ const BaseSubobject &Base =
+ AddressPointsByIndex.find(NextIndex)->second;
+
+ // FIXME: Instead of dividing by 8, we should be using CharUnits.
+ Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
+ Out << ", " << Base.getBaseOffset() / 8 << ") vtable address --\n";
+ } else {
+ uint64_t BaseOffset =
+ AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
+
+ // We store the class names in a set to get a stable order.
+ std::set<std::string> ClassNames;
+ for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
+ AddressPointsByIndex.lower_bound(NextIndex), E =
+ AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
+ assert(I->second.getBaseOffset() == BaseOffset &&
+ "Invalid base offset!");
+ const CXXRecordDecl *RD = I->second.getBase();
+ ClassNames.insert(RD->getQualifiedNameAsString());
+ }
+
+ for (std::set<std::string>::const_iterator I = ClassNames.begin(),
+ E = ClassNames.end(); I != E; ++I) {
+ // FIXME: Instead of dividing by 8, we should be using CharUnits.
+ Out << " -- (" << *I;
+ Out << ", " << BaseOffset / 8 << ") vtable address --\n";
+ }
+ }
+ }
+ }
+
+ Out << '\n';
+
+ if (isBuildingConstructorVTable())
+ return;
+
+ if (MostDerivedClass->getNumVBases()) {
+ // We store the virtual base class names and their offsets in a map to get
+ // a stable order.
+
+ std::map<std::string, int64_t> ClassNamesAndOffsets;
+ for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
+ E = VBaseOffsetOffsets.end(); I != E; ++I) {
+ std::string ClassName = I->first->getQualifiedNameAsString();
+ int64_t OffsetOffset = I->second;
+ ClassNamesAndOffsets.insert(std::make_pair(ClassName, OffsetOffset));
+ }
+
+ Out << "Virtual base offset offsets for '";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "' (";
+ Out << ClassNamesAndOffsets.size();
+ Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (std::map<std::string, int64_t>::const_iterator I =
+ ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
+ I != E; ++I)
+ Out << " " << I->first << " | " << I->second << '\n';
+
+ Out << "\n";
+ }
+
+ if (!Thunks.empty()) {
+ // We store the method names in a map to get a stable order.
+ std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
+
+ for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
+ I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
+ }
+
+ for (std::map<std::string, const CXXMethodDecl *>::const_iterator I =
+ MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end();
+ I != E; ++I) {
+ const std::string &MethodName = I->first;
+ const CXXMethodDecl *MD = I->second;
+
+ ThunkInfoVectorTy ThunksVector = Thunks[MD];
+ std::sort(ThunksVector.begin(), ThunksVector.end());
+
+ Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
+ Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
+ const ThunkInfo &Thunk = ThunksVector[I];
+
+ Out << llvm::format("%4d | ", I);
+
+ // If this function pointer has a return pointer adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "return adjustment: " << Thunk.This.NonVirtual;
+ Out << " non-virtual";
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ if (!Thunk.This.isEmpty())
+ Out << "\n ";
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+ }
+
+ Out << '\n';
+ }
+
+ Out << '\n';
+
+ }
+ }
+}
+
+}
+
+void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
+
+ // Itanium C++ ABI 2.5.2:
+ // The order of the virtual function pointers in a virtual table is the
+ // order of declaration of the corresponding member functions in the class.
+ //
+ // There is an entry for any virtual function declared in a class,
+ // whether it is a new function or overrides a base class function,
+ // unless it overrides a function from the primary base, and conversion
+ // between their return types does not require an adjustment.
+
+ int64_t CurrentIndex = 0;
+
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (PrimaryBase) {
+ assert(PrimaryBase->isDefinition() &&
+ "Should have the definition decl of the primary base!");
+
+ // Since the record decl shares its vtable pointer with the primary base
+ // we need to start counting at the end of the primary base's vtable.
+ CurrentIndex = getNumVirtualFunctionPointers(PrimaryBase);
+ }
+
+ // Collect all the primary bases, so we can check whether methods override
+ // a method from the base.
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+ for (ASTRecordLayout::primary_base_info_iterator
+ I = Layout.primary_base_begin(), E = Layout.primary_base_end();
+ I != E; ++I)
+ PrimaryBases.insert((*I).getBase());
+
+ const CXXDestructorDecl *ImplicitVirtualDtor = 0;
+
+ for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+ e = RD->method_end(); i != e; ++i) {
+ const CXXMethodDecl *MD = *i;
+
+ // We only want virtual methods.
+ if (!MD->isVirtual())
+ continue;
+
+ // Check if this method overrides a method in the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ // Check if converting from the return type of the method to the
+ // return type of the overridden method requires conversion.
+ if (ComputeReturnAdjustmentBaseOffset(CGM.getContext(), MD,
+ OverriddenMD).isEmpty()) {
+ // This index is shared between the index in the vtable of the primary
+ // base class.
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ const CXXDestructorDecl *OverriddenDD =
+ cast<CXXDestructorDecl>(OverriddenMD);
+
+ // Add both the complete and deleting entries.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
+ } else {
+ MethodVTableIndices[MD] = getMethodVTableIndex(OverriddenMD);
+ }
+
+ // We don't need to add an entry for this method.
+ continue;
+ }
+ }
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (MD->isImplicit()) {
+ assert(!ImplicitVirtualDtor &&
+ "Did already see an implicit virtual dtor!");
+ ImplicitVirtualDtor = DD;
+ continue;
+ }
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
+ } else {
+ // Add the entry.
+ MethodVTableIndices[MD] = CurrentIndex++;
+ }
+ }
+
+ if (ImplicitVirtualDtor) {
+ // Itanium C++ ABI 2.5.2:
+ // If a class has an implicitly-defined virtual destructor,
+ // its entries come after the declared virtual function pointers.
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] =
+ CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] =
+ CurrentIndex++;
+ }
+
+ NumVirtualFunctionPointers[RD] = CurrentIndex;
+}
+
+uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+ NumVirtualFunctionPointers.find(RD);
+ if (I != NumVirtualFunctionPointers.end())
+ return I->second;
+
+ ComputeMethodVTableIndices(RD);
+
+ I = NumVirtualFunctionPointers.find(RD);
+ assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!");
+ return I->second;
+}
+
+uint64_t CodeGenVTables::getMethodVTableIndex(GlobalDecl GD) {
+ MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
+ if (I != MethodVTableIndices.end())
+ return I->second;
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ ComputeMethodVTableIndices(RD);
+
+ I = MethodVTableIndices.find(GD);
+ assert(I != MethodVTableIndices.end() && "Did not find index!");
+ return I->second;
+}
+
+int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
+ ClassPairTy ClassPair(RD, VBase);
+
+ VirtualBaseClassOffsetOffsetsMapTy::iterator I =
+ VirtualBaseClassOffsetOffsets.find(ClassPair);
+ if (I != VirtualBaseClassOffsetOffsets.end())
+ return I->second;
+
+ VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0,
+ BaseSubobject(RD, 0),
+ /*BaseIsVirtual=*/false,
+ /*OffsetInLayoutClass=*/0);
+
+ for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ }
+
+ I = VirtualBaseClassOffsetOffsets.find(ClassPair);
+ assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
+
+ return I->second;
+}
+
+uint64_t
+CodeGenVTables::getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD) {
+ assert(AddressPoints.count(std::make_pair(RD, Base)) &&
+ "Did not find address point!");
+
+ uint64_t AddressPoint = AddressPoints.lookup(std::make_pair(RD, Base));
+ assert(AddressPoint && "Address point must not be zero!");
+
+ return AddressPoint;
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Compute the mangled name.
+ llvm::SmallString<256> Name;
+ if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
+ getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), Thunk.This,
+ Name);
+ else
+ getMangleContext().mangleThunk(MD, Thunk, Name);
+
+ const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(MD);
+ return GetOrCreateLLVMFunction(Name, Ty, GD);
+}
+
+static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ int64_t NonVirtualAdjustment,
+ int64_t VirtualAdjustment) {
+ if (!NonVirtualAdjustment && !VirtualAdjustment)
+ return Ptr;
+
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+
+ llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+
+ if (NonVirtualAdjustment) {
+ // Do the non-virtual adjustment.
+ V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ }
+
+ if (VirtualAdjustment) {
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ // Do the virtual adjustment.
+ llvm::Value *VTablePtrPtr =
+ CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
+
+ llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+
+ llvm::Value *OffsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+
+ OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+
+ // Load the adjustment offset from the vtable.
+ llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
+
+ // Adjust our pointer.
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset);
+ }
+
+ // Cast back to the original type.
+ return CGF.Builder.CreateBitCast(V, Ptr->getType());
+}
+
+void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+ QualType ThisType = MD->getThisType(getContext());
+
+ FunctionArgList FunctionArgs;
+
+ // FIXME: It would be nice if more of this code could be shared with
+ // CodeGenFunction::GenerateCode.
+
+ // Create the implicit 'this' parameter declaration.
+ CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
+ MD->getLocation(),
+ &getContext().Idents.get("this"),
+ ThisType);
+
+ // Add the 'this' parameter.
+ FunctionArgs.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+
+ FunctionArgs.push_back(std::make_pair(Param, Param->getType()));
+ }
+
+ StartFunction(GlobalDecl(), ResultType, Fn, FunctionArgs, SourceLocation());
+
+ // Adjust the 'this' pointer if necessary.
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, LoadCXXThis(),
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+
+ CallArgList CallArgs;
+
+ // Add our adjusted 'this' pointer.
+ CallArgs.push_back(std::make_pair(RValue::get(AdjustedThisPtr), ThisType));
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+ QualType ArgType = Param->getType();
+ RValue Arg = EmitDelegateCallArg(Param);
+
+ CallArgs.push_back(std::make_pair(Arg, ArgType));
+ }
+
+ // Get our callee.
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
+ FPT->getExtInfo());
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
+
+ if (!Thunk.Return.isEmpty()) {
+ // Emit the return adjustment.
+ bool NullCheckValue = !ResultType->isReferenceType();
+
+ llvm::BasicBlock *AdjustNull = 0;
+ llvm::BasicBlock *AdjustNotNull = 0;
+ llvm::BasicBlock *AdjustEnd = 0;
+
+ llvm::Value *ReturnValue = RV.getScalarVal();
+
+ if (NullCheckValue) {
+ AdjustNull = createBasicBlock("adjust.null");
+ AdjustNotNull = createBasicBlock("adjust.notnull");
+ AdjustEnd = createBasicBlock("adjust.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(ReturnValue);
+ Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
+ EmitBlock(AdjustNotNull);
+ }
+
+ ReturnValue = PerformTypeAdjustment(*this, ReturnValue,
+ Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(AdjustEnd);
+ EmitBlock(AdjustNull);
+ Builder.CreateBr(AdjustEnd);
+ EmitBlock(AdjustEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(ReturnValue->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(ReturnValue, AdjustNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ AdjustNull);
+ ReturnValue = PHI;
+ }
+
+ RV = RValue::get(ReturnValue);
+ }
+
+ if (!ResultType->isVoidType() && Slot.isNull())
+ EmitReturnOfRValue(RV, ResultType);
+
+ FinishFunction();
+
+ // Destroy the 'this' declaration.
+ CXXThisDecl->Destroy(getContext());
+
+ // Set the right linkage.
+ CGM.setFunctionLinkage(MD, Fn);
+
+ // Set the right visibility.
+ CGM.setGlobalVisibility(Fn, MD);
+}
+
+void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
+{
+ llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+ // There's already a declaration with the same name, check if it has the same
+ // type or if we need to replace it.
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
+ CGM.getTypes().GetFunctionTypeForVTable(MD)) {
+ llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldThunkFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // Remove the name from the old thunk function and get a new thunk.
+ OldThunkFn->setName(llvm::StringRef());
+ Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // If needed, replace the old thunk with a bitcast.
+ if (!OldThunkFn->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
+ OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Remove the old thunk.
+ OldThunkFn->eraseFromParent();
+ }
+
+ // Actually generate the thunk body.
+ llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+ CodeGenFunction(CGM).GenerateThunk(ThunkFn, GD, Thunk);
+}
+
+void CodeGenVTables::EmitThunks(GlobalDecl GD)
+{
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return;
+
+ const CXXRecordDecl *RD = MD->getParent();
+
+ // Compute VTable related info for this class.
+ ComputeVTableRelatedInformation(RD);
+
+ ThunksMapTy::const_iterator I = Thunks.find(MD);
+ if (I == Thunks.end()) {
+ // We did not find a thunk for this method.
+ return;
+ }
+
+ const ThunkInfoVectorTy &ThunkInfoVector = I->second;
+ for (unsigned I = 0, E = ThunkInfoVector.size(); I != E; ++I)
+ EmitThunk(GD, ThunkInfoVector[I]);
+}
+
+void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
+ uint64_t *&LayoutData = VTableLayoutMap[RD];
+
+ // Check if we've computed this information before.
+ if (LayoutData)
+ return;
+
+ // We may need to generate a definition for this vtable.
+ if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
+ RD->getTemplateSpecializationKind()
+ != TSK_ExplicitInstantiationDeclaration)
+ CGM.DeferredVTables.push_back(RD);
+
+ VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+
+ // Add the VTable layout.
+ uint64_t NumVTableComponents = Builder.getNumVTableComponents();
+ LayoutData = new uint64_t[NumVTableComponents + 1];
+
+ // Store the number of components.
+ LayoutData[0] = NumVTableComponents;
+
+ // Store the components.
+ std::copy(Builder.vtable_components_data_begin(),
+ Builder.vtable_components_data_end(),
+ &LayoutData[1]);
+
+ // Add the known thunks.
+ Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ // Add the thunks needed in this vtable.
+ assert(!VTableThunksMap.count(RD) &&
+ "Thunks already exists for this vtable!");
+
+ VTableThunksTy &VTableThunks = VTableThunksMap[RD];
+ VTableThunks.append(Builder.vtable_thunks_begin(),
+ Builder.vtable_thunks_end());
+
+ // Sort them.
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ // Add the address points.
+ for (VTableBuilder::AddressPointsMapTy::const_iterator I =
+ Builder.address_points_begin(), E = Builder.address_points_end();
+ I != E; ++I) {
+
+ uint64_t &AddressPoint = AddressPoints[std::make_pair(RD, I->first)];
+
+ // Check if we already have the address points for this base.
+ assert(!AddressPoint && "Address point already exists for this base!");
+
+ AddressPoint = I->second;
+ }
+
+ // If we don't have the vbase information for this class, insert it.
+ // getVirtualBaseOffsetOffset will compute it separately without computing
+ // the rest of the vtable related information.
+ if (!RD->getNumVBases())
+ return;
+
+ const RecordType *VBaseRT =
+ RD->vbases_begin()->getType()->getAs<RecordType>();
+ const CXXRecordDecl *VBase = cast<CXXRecordDecl>(VBaseRT->getDecl());
+
+ if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
+ return;
+
+ for (VTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ }
+}
+
+llvm::Constant *
+CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
+ const uint64_t *Components,
+ unsigned NumComponents,
+ const VTableThunksTy &VTableThunks) {
+ llvm::SmallVector<llvm::Constant *, 64> Inits;
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+ const llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ QualType ClassType = CGM.getContext().getTagDeclType(RD);
+ llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType);
+
+ unsigned NextVTableThunkIndex = 0;
+
+ llvm::Constant* PureVirtualFn = 0;
+
+ for (unsigned I = 0; I != NumComponents; ++I) {
+ VTableComponent Component =
+ VTableComponent::getFromOpaqueInteger(Components[I]);
+
+ llvm::Constant *Init = 0;
+
+ switch (Component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_VBaseOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_OffsetToTop:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_RTTI:
+ Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
+ break;
+ case VTableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ GlobalDecl GD;
+
+ // Get the right global decl.
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind");
+ case VTableComponent::CK_FunctionPointer:
+ GD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
+ break;
+ }
+
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ // We have a pure virtual member function.
+ if (!PureVirtualFn) {
+ const llvm::FunctionType *Ty =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ /*isVarArg=*/false);
+ PureVirtualFn =
+ CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ Int8PtrTy);
+ }
+
+ Init = PureVirtualFn;
+ } else {
+ // Check if we should use a thunk.
+ if (NextVTableThunkIndex < VTableThunks.size() &&
+ VTableThunks[NextVTableThunkIndex].first == I) {
+ const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
+
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
+
+ NextVTableThunkIndex++;
+ } else {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(MD);
+
+ Init = CGM.GetAddrOfFunction(GD, Ty);
+ }
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ }
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer:
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ };
+
+ Inits.push_back(Init);
+ }
+
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
+ return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size());
+}
+
+/// GetGlobalVariable - Will return a global variable of the given type.
+/// If a variable with a different type already exists then a new variable
+/// with the right type will be created.
+/// FIXME: We should move this to CodeGenModule and rename it to something
+/// better and then use it in CGVTT and CGRTTI.
+static llvm::GlobalVariable *
+GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name,
+ const llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+
+ llvm::GlobalVariable *GV = Module.getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = 0;
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(Module, Ty, /*isConstant=*/true,
+ Linkage, 0, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ OldGV->eraseFromParent();
+ }
+
+ return GV;
+}
+
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXVTable(RD, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ ComputeVTableRelatedInformation(RD);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD));
+
+ return GetGlobalVariable(CGM.getModule(), Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+}
+
+void
+CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ // Dump the vtable layout if necessary.
+ if (CGM.getLangOptions().DumpVTableLayouts) {
+ VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+
+ Builder.dumpLayout(llvm::errs());
+ }
+
+ assert(VTableThunksMap.count(RD) &&
+ "No thunk status for this record decl!");
+
+ const VTableThunksTy& Thunks = VTableThunksMap[RD];
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(RD, getVTableComponentsData(RD),
+ getNumVTableComponents(RD), Thunks);
+ VTable->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTable->setLinkage(Linkage);
+}
+
+llvm::GlobalVariable *
+CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ VTableAddressPointsMapTy& AddressPoints) {
+ VTableBuilder Builder(*this, Base.getBase(), Base.getBaseOffset(),
+ /*MostDerivedClassIsVirtual=*/BaseIsVirtual, RD);
+
+ // Dump the vtable layout if necessary.
+ if (CGM.getLangOptions().DumpVTableLayouts)
+ Builder.dumpLayout(llvm::errs());
+
+ // Add the address points.
+ AddressPoints.insert(Builder.address_points_begin(),
+ Builder.address_points_end());
+
+ // Get the mangled construction vtable name.
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8,
+ Base.getBase(), OutName);
+ llvm::StringRef Name = OutName.str();
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getNumVTableComponents());
+
+ // Create the variable that will hold the construction vtable.
+ llvm::GlobalVariable *VTable =
+ GetGlobalVariable(CGM.getModule(), Name, ArrayType,
+ llvm::GlobalValue::InternalLinkage);
+
+ // Add the thunks.
+ VTableThunksTy VTableThunks;
+ VTableThunks.append(Builder.vtable_thunks_begin(),
+ Builder.vtable_thunks_end());
+
+ // Sort them.
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(Base.getBase(),
+ Builder.vtable_components_data_begin(),
+ Builder.getNumVTableComponents(), VTableThunks);
+ VTable->setInitializer(Init);
+
+ return VTable;
+}
+
+void
+CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *&VTable = VTables[RD];
+ if (VTable) {
+ assert(VTable->getInitializer() && "VTable doesn't have a definition!");
+ return;
+ }
+
+ VTable = GetAddrOfVTable(RD);
+ EmitVTableDefinition(VTable, Linkage, RD);
+
+ GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = RD->getDeclContext();
+ if (RD->getIdentifier() &&
+ RD->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) &&
+ cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit())
+ CGM.EmitFundamentalRTTIDescriptors();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
new file mode 100644
index 0000000..e55377f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
@@ -0,0 +1,364 @@
+//===--- CGVTables.h - Emit LLVM Code for C++ vtables ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVTABLE_H
+#define CLANG_CODEGEN_CGVTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/GlobalVariable.h"
+#include "GlobalDecl.h"
+
+namespace clang {
+ class CXXRecordDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+/// ReturnAdjustment - A return adjustment.
+struct ReturnAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
+ /// of the virtual base class offset.
+ int64_t VBaseOffsetOffset;
+
+ ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
+
+ friend bool operator==(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
+ }
+
+ friend bool operator<(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset;
+ }
+};
+
+/// ThisAdjustment - A 'this' pointer adjustment.
+struct ThisAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
+ /// of the virtual call offset.
+ int64_t VCallOffsetOffset;
+
+ ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
+
+ friend bool operator==(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
+ }
+
+ friend bool operator<(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset < RHS.VCallOffsetOffset;
+ }
+};
+
+/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
+/// adjustment for a thunk.
+struct ThunkInfo {
+ /// This - The 'this' pointer adjustment.
+ ThisAdjustment This;
+
+ /// Return - The return adjustment.
+ ReturnAdjustment Return;
+
+ ThunkInfo() { }
+
+ ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
+ : This(This), Return(Return) { }
+
+ friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ return LHS.This == RHS.This && LHS.Return == RHS.Return;
+ }
+
+ friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ if (LHS.This < RHS.This)
+ return true;
+
+ return LHS.This == RHS.This && LHS.Return < RHS.Return;
+ }
+
+ bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
+};
+
+// BaseSubobject - Uniquely identifies a direct or indirect base class.
+// Stores both the base class decl and the offset from the most derived class to
+// the base class.
+class BaseSubobject {
+ /// Base - The base class declaration.
+ const CXXRecordDecl *Base;
+
+ /// BaseOffset - The offset from the most derived class to the base class.
+ uint64_t BaseOffset;
+
+public:
+ BaseSubobject(const CXXRecordDecl *Base, uint64_t BaseOffset)
+ : Base(Base), BaseOffset(BaseOffset) { }
+
+ /// getBase - Returns the base class declaration.
+ const CXXRecordDecl *getBase() const { return Base; }
+
+ /// getBaseOffset - Returns the base class offset.
+ uint64_t getBaseOffset() const { return BaseOffset; }
+
+ friend bool operator==(const BaseSubobject &LHS, const BaseSubobject &RHS) {
+ return LHS.Base == RHS.Base && LHS.BaseOffset == RHS.BaseOffset;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CodeGen::BaseSubobject> {
+ static clang::CodeGen::BaseSubobject getEmptyKey() {
+ return clang::CodeGen::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getEmptyKey(),
+ DenseMapInfo<uint64_t>::getEmptyKey());
+ }
+
+ static clang::CodeGen::BaseSubobject getTombstoneKey() {
+ return clang::CodeGen::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getTombstoneKey(),
+ DenseMapInfo<uint64_t>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const clang::CodeGen::BaseSubobject &Base) {
+ return
+ DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
+ DenseMapInfo<uint64_t>::getHashValue(Base.getBaseOffset());
+ }
+
+ static bool isEqual(const clang::CodeGen::BaseSubobject &LHS,
+ const clang::CodeGen::BaseSubobject &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// It's OK to treat BaseSubobject as a POD type.
+template <> struct isPodLike<clang::CodeGen::BaseSubobject> {
+ static const bool value = true;
+};
+
+}
+
+namespace clang {
+namespace CodeGen {
+
+class CodeGenVTables {
+ CodeGenModule &CGM;
+
+ /// MethodVTableIndices - Contains the index (relative to the vtable address
+ /// point) where the function pointer for a virtual function is stored.
+ typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
+ MethodVTableIndicesTy MethodVTableIndices;
+
+ typedef std::pair<const CXXRecordDecl *,
+ const CXXRecordDecl *> ClassPairTy;
+
+ /// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
+ /// the address point) in bytes where the offsets for virtual bases of a class
+ /// are stored.
+ typedef llvm::DenseMap<ClassPairTy, int64_t>
+ VirtualBaseClassOffsetOffsetsMapTy;
+ VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
+
+ /// VTables - All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
+
+ /// NumVirtualFunctionPointers - Contains the number of virtual function
+ /// pointers in the vtable for a given record decl.
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers;
+
+ typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - Contains all thunks that a given method decl will need.
+ ThunksMapTy Thunks;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t *> VTableLayoutMapTy;
+
+ /// VTableLayoutMap - Stores the vtable layout for all record decls.
+ /// The layout is stored as an array of 64-bit integers, where the first
+ /// integer is the number of vtable entries in the layout, and the subsequent
+ /// integers are the vtable components.
+ VTableLayoutMapTy VTableLayoutMap;
+
+ typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> AddressPointsMapTy;
+
+ /// Address points - Address points for all vtables.
+ AddressPointsMapTy AddressPoints;
+
+ /// VTableAddressPointsMapTy - Address points for a single vtable.
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
+
+ typedef llvm::SmallVector<std::pair<uint64_t, ThunkInfo>, 1>
+ VTableThunksTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, VTableThunksTy>
+ VTableThunksMapTy;
+
+ /// VTableThunksMap - Contains thunks needed by vtables.
+ VTableThunksMapTy VTableThunksMap;
+
+ uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const {
+ assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
+
+ return VTableLayoutMap.lookup(RD)[0];
+ }
+
+ const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const {
+ assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
+
+ uint64_t *Components = VTableLayoutMap.lookup(RD);
+ return &Components[1];
+ }
+
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
+
+ /// SubVTTIndicies - Contains indices into the various sub-VTTs.
+ SubVTTIndiciesMapTy SubVTTIndicies;
+
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
+ SecondaryVirtualPointerIndicesMapTy;
+
+ /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer
+ /// indices.
+ SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
+
+ /// getNumVirtualFunctionPointers - Return the number of virtual function
+ /// pointers in the vtable for a given record decl.
+ uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
+
+ void ComputeMethodVTableIndices(const CXXRecordDecl *RD);
+
+ llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
+ const CXXRecordDecl *RD);
+
+ /// EmitThunk - Emit a single thunk.
+ void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// ComputeVTableRelatedInformation - Compute and store all vtable related
+ /// information (vtable layout, vbase offset offsets, thunks etc) for the
+ /// given record decl.
+ void ComputeVTableRelatedInformation(const CXXRecordDecl *RD);
+
+ /// CreateVTableInitializer - Create a vtable initializer for the given record
+ /// decl.
+ /// \param Components - The vtable components; this is really an array of
+ /// VTableComponents.
+ llvm::Constant *CreateVTableInitializer(const CXXRecordDecl *RD,
+ const uint64_t *Components,
+ unsigned NumComponents,
+ const VTableThunksTy &VTableThunks);
+
+public:
+ CodeGenVTables(CodeGenModule &CGM)
+ : CGM(CGM) { }
+
+ // isKeyFunctionInAnotherTU - True if this record has a key function and it is
+ // in another translation unit.
+ static bool isKeyFunctionInAnotherTU(ASTContext &Context,
+ const CXXRecordDecl *RD) {
+ assert (RD->isDynamicClass() && "Non dynamic classes have no key.");
+ const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
+ return KeyFunction && !KeyFunction->getBody();
+ }
+
+ /// needsVTTParameter - Return whether the given global decl needs a VTT
+ /// parameter, which it does if it's a base constructor or destructor with
+ /// virtual bases.
+ static bool needsVTTParameter(GlobalDecl GD);
+
+ /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
+ /// given record decl.
+ uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base);
+
+ /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the
+ /// virtual pointer for the given subobject is located.
+ uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base);
+
+ /// getMethodVTableIndex - Return the index (relative to the vtable address
+ /// point) where the function pointer for the given virtual function is
+ /// stored.
+ uint64_t getMethodVTableIndex(GlobalDecl GD);
+
+ /// getVirtualBaseOffsetOffset - Return the offset in bytes (relative to the
+ /// vtable address point) where the offset of the virtual base that contains
+ /// the given base is stored, otherwise, if no virtual base contains the given
+ /// class, return 0. Base must be a virtual base class or an unambigious
+ /// base.
+ int64_t getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase);
+
+ /// getAddressPoint - Get the address point of the given subobject in the
+ /// class decl.
+ uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD);
+
+ /// GetAddrOfVTable - Get the address of the vtable for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTable(const CXXRecordDecl *RD);
+
+ /// EmitVTableDefinition - Emit the definition of the given vtable.
+ void EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+
+ /// GenerateConstructionVTable - Generate a construction vtable for the given
+ /// base subobject.
+ llvm::GlobalVariable *
+ GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ VTableAddressPointsMapTy& AddressPoints);
+
+ llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
+
+ /// EmitThunks - Emit the associated thunks for the given global decl.
+ void EmitThunks(GlobalDecl GD);
+
+ /// GenerateClassData - Generate all the class data required to be generated
+ /// upon definition of a KeyFunction. This includes the vtable, the
+ /// rtti data structure and the VTT.
+ ///
+ /// \param Linkage - The desired linkage of the vtable, the RTTI and the VTT.
+ void GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..92ef9dc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
@@ -0,0 +1,308 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVALUE_H
+#define CLANG_CODEGEN_CGVALUE_H
+
+#include "clang/AST/Type.h"
+
+namespace llvm {
+ class Constant;
+ class Value;
+}
+
+namespace clang {
+ class ObjCPropertyRefExpr;
+ class ObjCImplicitSetterGetterRefExpr;
+
+namespace CodeGen {
+ class CGBitFieldInfo;
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated. It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+ enum Flavor { Scalar, Complex, Aggregate };
+
+ // Stores first value and flavor.
+ llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
+ // Stores second value and volatility.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+
+public:
+ bool isScalar() const { return V1.getInt() == Scalar; }
+ bool isComplex() const { return V1.getInt() == Complex; }
+ bool isAggregate() const { return V1.getInt() == Aggregate; }
+
+ bool isVolatileQualified() const { return V2.getInt(); }
+
+ /// getScalarVal() - Return the Value* of this scalar value.
+ llvm::Value *getScalarVal() const {
+ assert(isScalar() && "Not a scalar!");
+ return V1.getPointer();
+ }
+
+ /// getComplexVal - Return the real/imag components of this complex value.
+ ///
+ std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+ return std::make_pair(V1.getPointer(), V2.getPointer());
+ }
+
+ /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+ llvm::Value *getAggregateAddr() const {
+ assert(isAggregate() && "Not an aggregate!");
+ return V1.getPointer();
+ }
+
+ static RValue get(llvm::Value *V) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Scalar);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+ RValue ER;
+ ER.V1.setPointer(V1);
+ ER.V2.setPointer(V2);
+ ER.V1.setInt(Complex);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+ return getComplex(C.first, C.second);
+ }
+ // FIXME: Aggregate rvalues need to retain information about whether they are
+ // volatile or not. Remove default to find all places that probably get this
+ // wrong.
+ static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Aggregate);
+ ER.V2.setInt(Volatile);
+ return ER;
+ }
+};
+
+
+/// LValue - This represents an lvalue references. Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+ // FIXME: alignment?
+
+ enum {
+ Simple, // This is a normal l-value, use getAddress().
+ VectorElt, // This is a vector element l-value (V[i]), use getVector*
+ BitField, // This is a bitfield l-value, use getBitfield*.
+ ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
+ PropertyRef, // This is an Objective-C property reference, use
+ // getPropertyRefExpr
+ KVCRef // This is an objective-c 'implicit' property ref,
+ // use getKVCRefExpr
+ } LVType;
+
+ llvm::Value *V;
+
+ union {
+ // Index into a vector subscript: V[i]
+ llvm::Value *VectorIdx;
+
+ // ExtVector element subset: V.xyx
+ llvm::Constant *VectorElts;
+
+ // BitField start bit and size
+ const CGBitFieldInfo *BitFieldInfo;
+
+ // Obj-C property reference expression
+ const ObjCPropertyRefExpr *PropertyRefExpr;
+
+ // ObjC 'implicit' property reference expression
+ const ObjCImplicitSetterGetterRefExpr *KVCRefExpr;
+ };
+
+ // 'const' is unused here
+ Qualifiers Quals;
+
+ // objective-c's ivar
+ bool Ivar:1;
+
+ // objective-c's ivar is an array
+ bool ObjIsArray:1;
+
+ // LValue is non-gc'able for any reason, including being a parameter or local
+ // variable.
+ bool NonGC: 1;
+
+ // Lvalue is a global reference of an objective-c object
+ bool GlobalObjCRef : 1;
+
+ Expr *BaseIvarExp;
+private:
+ void SetQualifiers(Qualifiers Quals) {
+ this->Quals = Quals;
+
+ // FIXME: Convenient place to set objc flags to 0. This should really be
+ // done in a user-defined constructor instead.
+ this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+ this->BaseIvarExp = 0;
+ }
+
+public:
+ bool isSimple() const { return LVType == Simple; }
+ bool isVectorElt() const { return LVType == VectorElt; }
+ bool isBitField() const { return LVType == BitField; }
+ bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+ bool isPropertyRef() const { return LVType == PropertyRef; }
+ bool isKVCRef() const { return LVType == KVCRef; }
+
+ bool isVolatileQualified() const { return Quals.hasVolatile(); }
+ bool isRestrictQualified() const { return Quals.hasRestrict(); }
+ unsigned getVRQualifiers() const {
+ return Quals.getCVRQualifiers() & ~Qualifiers::Const;
+ }
+
+ bool isObjCIvar() const { return Ivar; }
+ bool isObjCArray() const { return ObjIsArray; }
+ bool isNonGC () const { return NonGC; }
+ bool isGlobalObjCRef() const { return GlobalObjCRef; }
+ bool isObjCWeak() const { return Quals.getObjCGCAttr() == Qualifiers::Weak; }
+ bool isObjCStrong() const { return Quals.getObjCGCAttr() == Qualifiers::Strong; }
+
+ Expr *getBaseIvarExp() const { return BaseIvarExp; }
+ void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ static void SetObjCIvar(LValue& R, bool iValue) {
+ R.Ivar = iValue;
+ }
+ static void SetObjCArray(LValue& R, bool iValue) {
+ R.ObjIsArray = iValue;
+ }
+ static void SetGlobalObjCRef(LValue& R, bool iValue) {
+ R.GlobalObjCRef = iValue;
+ }
+
+ static void SetObjCNonGC(LValue& R, bool iValue) {
+ R.NonGC = iValue;
+ }
+
+ // simple lvalue
+ llvm::Value *getAddress() const { assert(isSimple()); return V; }
+
+ // vector elt lvalue
+ llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+
+ // extended vector elements.
+ llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ llvm::Constant *getExtVectorElts() const {
+ assert(isExtVectorElt());
+ return VectorElts;
+ }
+
+ // bitfield lvalue
+ llvm::Value *getBitFieldBaseAddr() const {
+ assert(isBitField());
+ return V;
+ }
+ const CGBitFieldInfo &getBitFieldInfo() const {
+ assert(isBitField());
+ return *BitFieldInfo;
+ }
+
+ // property ref lvalue
+ const ObjCPropertyRefExpr *getPropertyRefExpr() const {
+ assert(isPropertyRef());
+ return PropertyRefExpr;
+ }
+
+ // 'implicit' property ref lvalue
+ const ObjCImplicitSetterGetterRefExpr *getKVCRefExpr() const {
+ assert(isKVCRef());
+ return KVCRefExpr;
+ }
+
+ static LValue MakeAddr(llvm::Value *V, Qualifiers Quals) {
+ LValue R;
+ R.LVType = Simple;
+ R.V = V;
+ R.SetQualifiers(Quals);
+ return R;
+ }
+
+ static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = VectorElt;
+ R.V = Vec;
+ R.VectorIdx = Idx;
+ R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = ExtVectorElt;
+ R.V = Vec;
+ R.VectorElts = Elts;
+ R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ /// \brief Create a new object to represent a bit-field access.
+ ///
+ /// \param BaseValue - The base address of the structure containing the
+ /// bit-field.
+ /// \param Info - The information describing how to perform the bit-field
+ /// access.
+ static LValue MakeBitfield(llvm::Value *BaseValue, const CGBitFieldInfo &Info,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = BitField;
+ R.V = BaseValue;
+ R.BitFieldInfo = &Info;
+ R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ // FIXME: It is probably bad that we aren't emitting the target when we build
+ // the lvalue. However, this complicates the code a bit, and I haven't figured
+ // out how to make it go wrong yet.
+ static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = PropertyRef;
+ R.PropertyRefExpr = E;
+ R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+
+ static LValue MakeKVCRef(const ObjCImplicitSetterGetterRefExpr *E,
+ unsigned CVR) {
+ LValue R;
+ R.LVType = KVCRef;
+ R.KVCRefExpr = E;
+ R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ return R;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
new file mode 100644
index 0000000..a226400
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
@@ -0,0 +1,37 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangCodeGen
+ CGBlocks.cpp
+ CGBuiltin.cpp
+ CGCall.cpp
+ CGClass.cpp
+ CGCXX.cpp
+ CGDebugInfo.cpp
+ CGDecl.cpp
+ CGDeclCXX.cpp
+ CGException.cpp
+ CGExpr.cpp
+ CGExprAgg.cpp
+ CGExprComplex.cpp
+ CGExprConstant.cpp
+ CGExprCXX.cpp
+ CGExprScalar.cpp
+ CGObjC.cpp
+ CGObjCGNU.cpp
+ CGObjCMac.cpp
+ CGRecordLayoutBuilder.cpp
+ CGRTTI.cpp
+ CGStmt.cpp
+ CGTemporaries.cpp
+ CGVTables.cpp
+ CGVTT.cpp
+ CodeGenFunction.cpp
+ CodeGenModule.cpp
+ CodeGenTypes.cpp
+ ItaniumCXXABI.cpp
+ Mangle.cpp
+ ModuleBuilder.cpp
+ TargetInfo.cpp
+ )
+
+add_dependencies(clangCodeGen ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..73de0fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,835 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+ : BlockFunction(cgm, *this, Builder), CGM(cgm),
+ Target(CGM.getContext().Target),
+ Builder(cgm.getModule().getContext()),
+ DebugInfo(0), IndirectBranch(0),
+ SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
+ ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0),
+ UniqueAggrDestructorCount(0) {
+ LLVMIntTy = ConvertType(getContext().IntTy);
+ LLVMPointerWidth = Target.getPointerWidth(0);
+ Exceptions = getContext().getLangOptions().Exceptions;
+ CatchUndefined = getContext().getLangOptions().CatchUndefined;
+ CGM.getMangleContext().startNewFunction();
+}
+
+ASTContext &CodeGenFunction::getContext() const {
+ return CGM.getContext();
+}
+
+
+llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
+ llvm::BasicBlock *&BB = LabelMap[S];
+ if (BB) return BB;
+
+ // Create, but don't insert, the new block.
+ return BB = createBasicBlock(S->getName());
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
+ llvm::Value *Res = LocalDeclMap[VD];
+ assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return Res;
+}
+
+llvm::Constant *
+CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+ return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+}
+
+const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+ return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+}
+
+bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
+ return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
+ T->isMemberFunctionPointerType();
+}
+
+void CodeGenFunction::EmitReturnBlock() {
+ // For cleanliness, we try to avoid emitting the return block for
+ // simple cases.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (CurBB) {
+ assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+ // We have a valid insert point, reuse it if it is empty or there are no
+ // explicit jumps to the return block.
+ if (CurBB->empty() || ReturnBlock->use_empty()) {
+ ReturnBlock->replaceAllUsesWith(CurBB);
+ delete ReturnBlock;
+ } else
+ EmitBlock(ReturnBlock);
+ return;
+ }
+
+ // Otherwise, if the return block is the target of a single direct
+ // branch then we can just put the code in that block instead. This
+ // cleans up functions which started with a unified return block.
+ if (ReturnBlock->hasOneUse()) {
+ llvm::BranchInst *BI =
+ dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
+ if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
+ // Reset insertion point and delete the branch.
+ Builder.SetInsertPoint(BI->getParent());
+ BI->eraseFromParent();
+ delete ReturnBlock;
+ return;
+ }
+ }
+
+ // FIXME: We are at an unreachable point, there is no reason to emit the block
+ // unless it has uses. However, we still need a place to put the debug
+ // region.end for now.
+
+ EmitBlock(ReturnBlock);
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+ assert(BreakContinueStack.empty() &&
+ "mismatched push/pop in break/continue stack!");
+ assert(BlockScopes.empty() &&
+ "did not remove all blocks from block scope map!");
+ assert(CleanupEntries.empty() &&
+ "mismatched push/pop in cleanup stack!");
+
+ // Emit function epilog (to return).
+ EmitReturnBlock();
+
+ // Emit debug descriptor for function end.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(EndLoc);
+ DI->EmitRegionEnd(CurFn, Builder);
+ }
+
+ EmitFunctionEpilog(*CurFnInfo, ReturnValue);
+ EmitEndEHSpec(CurCodeDecl);
+
+ // If someone did an indirect goto, emit the indirect goto block at the end of
+ // the function.
+ if (IndirectBranch) {
+ EmitBlock(IndirectBranch->getParent());
+ Builder.ClearInsertionPoint();
+ }
+
+ // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+ llvm::Instruction *Ptr = AllocaInsertPt;
+ AllocaInsertPt = 0;
+ Ptr->eraseFromParent();
+
+ // If someone took the address of a label but never did an indirect goto, we
+ // made a zero entry PHI node, which is illegal, zap it now.
+ if (IndirectBranch) {
+ llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
+ if (PN->getNumIncomingValues() == 0) {
+ PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
+ PN->eraseFromParent();
+ }
+ }
+}
+
+void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc) {
+ const Decl *D = GD.getDecl();
+
+ DidCallStackSave = false;
+ CurCodeDecl = CurFuncDecl = D;
+ FnRetTy = RetTy;
+ CurFn = Fn;
+ assert(CurFn->isDeclaration() && "Function already has body?");
+
+ // Pass inline keyword to optimizer if it appears explicitly on any
+ // declaration.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
+ RE = FD->redecls_end(); RI != RE; ++RI)
+ if (RI->isInlineSpecified()) {
+ Fn->addFnAttr(llvm::Attribute::InlineHint);
+ break;
+ }
+
+ llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+ // Create a marker to make it easy to insert allocas into the entryblock
+ // later. Don't create this with the builder, because we don't want it
+ // folded.
+ llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext));
+ AllocaInsertPt = new llvm::BitCastInst(Undef,
+ llvm::Type::getInt32Ty(VMContext), "",
+ EntryBB);
+ if (Builder.isNamePreserving())
+ AllocaInsertPt->setName("allocapt");
+
+ ReturnBlock = createBasicBlock("return");
+
+ Builder.SetInsertPoint(EntryBB);
+
+ QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
+ false, false, 0, 0,
+ /*FIXME?*/
+ FunctionType::ExtInfo());
+
+ // Emit subprogram debug descriptor.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(StartLoc);
+ DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
+ }
+
+ // FIXME: Leaked.
+ // CC info is ignored, hopefully?
+ CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
+ FunctionType::ExtInfo());
+
+ if (RetTy->isVoidType()) {
+ // Void type; nothing to return.
+ ReturnValue = 0;
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType())) {
+ // Indirect aggregate return; emit returned value directly into sret slot.
+ // This reduces code size, and affects correctness in C++.
+ ReturnValue = CurFn->arg_begin();
+ } else {
+ ReturnValue = CreateIRTemp(RetTy, "retval");
+ }
+
+ EmitStartEHSpec(CurCodeDecl);
+ EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+ if (CXXThisDecl)
+ CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
+ if (CXXVTTDecl)
+ CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
+
+ // If any of the arguments have a variably modified type, make sure to
+ // emit the type size.
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ QualType Ty = i->second;
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+}
+
+void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
+ assert(FD->getBody());
+ EmitStmt(FD->getBody());
+}
+
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ // Check if we should generate debug info for this function.
+ if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+
+ FunctionArgList Args;
+
+ CurGD = GD;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isInstance()) {
+ // Create the implicit 'this' decl.
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
+ FD->getLocation(),
+ &getContext().Idents.get("this"),
+ MD->getThisType(getContext()));
+ Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
+
+ // Check if we need a VTT parameter as well.
+ if (CodeGenVTables::needsVTTParameter(GD)) {
+ // FIXME: The comment about using a fake decl above applies here too.
+ QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+ CXXVTTDecl =
+ ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
+ &getContext().Idents.get("vtt"), T);
+ Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
+ }
+ }
+ }
+
+ if (FD->getNumParams()) {
+ const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
+ assert(FProto && "Function def must have prototype!");
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(std::make_pair(FD->getParamDecl(i),
+ FProto->getArgType(i)));
+ }
+
+ SourceRange BodyRange;
+ if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
+
+ // Emit the standard function prologue.
+ StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
+
+ // Generate the body of the function.
+ if (isa<CXXDestructorDecl>(FD))
+ EmitDestructorBody(Args);
+ else if (isa<CXXConstructorDecl>(FD))
+ EmitConstructorBody(Args);
+ else
+ EmitFunctionBody(Args);
+
+ // Emit the standard function epilogue.
+ FinishFunction(BodyRange.getEnd());
+
+ // Destroy the 'this' declaration.
+ if (CXXThisDecl)
+ CXXThisDecl->Destroy(getContext());
+
+ // Destroy the VTT declaration.
+ if (CXXVTTDecl)
+ CXXVTTDecl->Destroy(getContext());
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it. If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a label, we have to emit the code, consider something like:
+ // if (0) { ... foo: bar(); } goto foo;
+ if (isa<LabelStmt>(S))
+ return true;
+
+ // If this is a case/default statement, and we haven't seen a switch, we have
+ // to emit the code.
+ if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+ return true;
+
+ // If this is a switch statement, we want to ignore cases below it.
+ if (isa<SwitchStmt>(S))
+ IgnoreCaseStmts = true;
+
+ // Scan subexpressions for verboten labels.
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (ContainsLabel(*I, IgnoreCaseStmts))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
+/// a constant, or if it does but contains a label, return 0. If it constant
+/// folds to 'true' and does not contain a label, return 1, if it constant folds
+/// to 'false' and does not contain a label, return -1.
+int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
+ // FIXME: Rename and handle conversion of other evaluatable things
+ // to bool.
+ Expr::EvalResult Result;
+ if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
+ Result.HasSideEffects)
+ return 0; // Not foldable, not integer or not fully evaluatable.
+
+ if (CodeGenFunction::ContainsLabel(Cond))
+ return 0; // Contains a label.
+
+ return Result.Val.getInt().getBoolValue() ? 1 : -1;
+}
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks. Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
+ return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
+
+ if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+ // Handle X && Y in a condition.
+ if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
+ // If we have "1 && X", simplify the code. "0 && X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
+ // br(1 && X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X && 1", simplify the code to use an uncond branch.
+ // "X && 0" would have been constant folded to 0.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
+ // br(X && 1) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is false, we
+ // want to jump to the FalseBlock.
+ llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+ EmitBlock(LHSTrue);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ EndConditionalBranch();
+
+ return;
+ } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
+ // If we have "0 || X", simplify the code. "1 || X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
+ // br(0 || X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X || 0", simplify the code to use an uncond branch.
+ // "X || 1" would have been constant folded to 1.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
+ // br(X || 0) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is true, we
+ // want to jump to the TrueBlock.
+ llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+ EmitBlock(LHSFalse);
+
+ // Any temporaries created here are conditional.
+ BeginConditionalBranch();
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ EndConditionalBranch();
+
+ return;
+ }
+ }
+
+ if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+ // br(!x, t, f) -> br(x, f, t)
+ if (CondUOp->getOpcode() == UnaryOperator::LNot)
+ return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+ }
+
+ if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+ // Handle ?: operator.
+
+ // Just ignore GNU ?: extension.
+ if (CondOp->getLHS()) {
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ }
+ }
+
+ // Emit the code with the fully general case.
+ llvm::Value *CondV = EvaluateExprAsBool(Cond);
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ CGM.ErrorUnsupported(S, Type, OmitOnError);
+}
+
+void
+CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ if (CGM.getTypes().ContainsPointerToDataMember(Ty)) {
+ llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, llvm::Twine());
+ EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false);
+ return;
+ }
+
+
+ // Ignore empty classes in C++.
+ if (getContext().getLangOptions().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ return;
+ }
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+ // Don't bother emitting a zero-byte memset.
+ if (TypeInfo.first == 0)
+ return;
+
+ // FIXME: Handle variable sized types.
+ const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext,
+ LLVMPointerWidth);
+
+ Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtr), DestPtr,
+ llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
+ // TypeInfo.first describes size in bits.
+ llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ TypeInfo.second/8),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
+ 0));
+}
+
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
+ // Make sure that there is a block for the indirect goto.
+ if (IndirectBranch == 0)
+ GetIndirectGotoBlock();
+
+ llvm::BasicBlock *BB = getBasicBlockForLabel(L);
+
+ // Make sure the indirect branch includes all of the address-taken blocks.
+ IndirectBranch->addDestination(BB);
+ return llvm::BlockAddress::get(CurFn, BB);
+}
+
+llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
+ // If we already made the indirect branch for indirect goto, return its block.
+ if (IndirectBranch) return IndirectBranch->getParent();
+
+ CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // Create the PHI node that indirect gotos will add entries to.
+ llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
+
+ // Create the indirect branch instruction.
+ IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
+ return IndirectBranch->getParent();
+}
+
+llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
+ llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+
+ assert(SizeEntry && "Did not emit size for type");
+ return SizeEntry;
+}
+
+llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
+ assert(Ty->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ EnsureInsertPoint();
+
+ if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
+ llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+
+ if (!SizeEntry) {
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Get the element size;
+ QualType ElemTy = VAT->getElementType();
+ llvm::Value *ElemSize;
+ if (ElemTy->isVariableArrayType())
+ ElemSize = EmitVLASize(ElemTy);
+ else
+ ElemSize = llvm::ConstantInt::get(SizeTy,
+ getContext().getTypeSizeInChars(ElemTy).getQuantity());
+
+ llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
+ NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
+
+ SizeEntry = Builder.CreateMul(ElemSize, NumElements);
+ }
+
+ return SizeEntry;
+ }
+
+ if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ EmitVLASize(AT->getElementType());
+ return 0;
+ }
+
+ const PointerType *PT = Ty->getAs<PointerType>();
+ assert(PT && "unknown VM type!");
+ EmitVLASize(PT->getPointeeType());
+ return 0;
+}
+
+llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+ if (CGM.getContext().getBuiltinVaListType()->isArrayType()) {
+ return EmitScalarExpr(E);
+ }
+ return EmitLValue(E).getAddress();
+}
+
+void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
+ llvm::BasicBlock *CleanupExitBlock,
+ llvm::BasicBlock *PreviousInvokeDest,
+ bool EHOnly) {
+ CleanupEntries.push_back(CleanupEntry(CleanupEntryBlock, CleanupExitBlock,
+ PreviousInvokeDest, EHOnly));
+}
+
+void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) {
+ assert(CleanupEntries.size() >= OldCleanupStackSize &&
+ "Cleanup stack mismatch!");
+
+ while (CleanupEntries.size() > OldCleanupStackSize)
+ EmitCleanupBlock();
+}
+
+CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() {
+ CleanupEntry &CE = CleanupEntries.back();
+
+ llvm::BasicBlock *CleanupEntryBlock = CE.CleanupEntryBlock;
+
+ std::vector<llvm::BasicBlock *> Blocks;
+ std::swap(Blocks, CE.Blocks);
+
+ std::vector<llvm::BranchInst *> BranchFixups;
+ std::swap(BranchFixups, CE.BranchFixups);
+
+ bool EHOnly = CE.EHOnly;
+
+ setInvokeDest(CE.PreviousInvokeDest);
+
+ CleanupEntries.pop_back();
+
+ // Check if any branch fixups pointed to the scope we just popped. If so,
+ // we can remove them.
+ for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+ llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
+ BlockScopeMap::iterator I = BlockScopes.find(Dest);
+
+ if (I == BlockScopes.end())
+ continue;
+
+ assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
+
+ if (I->second == CleanupEntries.size()) {
+ // We don't need to do this branch fixup.
+ BranchFixups[i] = BranchFixups.back();
+ BranchFixups.pop_back();
+ i--;
+ e--;
+ continue;
+ }
+ }
+
+ llvm::BasicBlock *SwitchBlock = CE.CleanupExitBlock;
+ llvm::BasicBlock *EndBlock = 0;
+ if (!BranchFixups.empty()) {
+ if (!SwitchBlock)
+ SwitchBlock = createBasicBlock("cleanup.switch");
+ EndBlock = createBasicBlock("cleanup.end");
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ Builder.SetInsertPoint(SwitchBlock);
+
+ llvm::Value *DestCodePtr
+ = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext),
+ "cleanup.dst");
+ llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+
+ // Create a switch instruction to determine where to jump next.
+ llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
+ BranchFixups.size());
+
+ // Restore the current basic block (if any)
+ if (CurBB) {
+ Builder.SetInsertPoint(CurBB);
+
+ // If we had a current basic block, we also need to emit an instruction
+ // to initialize the cleanup destination.
+ Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)),
+ DestCodePtr);
+ } else
+ Builder.ClearInsertionPoint();
+
+ for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+ llvm::BranchInst *BI = BranchFixups[i];
+ llvm::BasicBlock *Dest = BI->getSuccessor(0);
+
+ // Fixup the branch instruction to point to the cleanup block.
+ BI->setSuccessor(0, CleanupEntryBlock);
+
+ if (CleanupEntries.empty()) {
+ llvm::ConstantInt *ID;
+
+ // Check if we already have a destination for this block.
+ if (Dest == SI->getDefaultDest())
+ ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ else {
+ ID = SI->findCaseDest(Dest);
+ if (!ID) {
+ // No code found, get a new unique one by using the number of
+ // switch successors.
+ ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ SI->getNumSuccessors());
+ SI->addCase(ID, Dest);
+ }
+ }
+
+ // Store the jump destination before the branch instruction.
+ new llvm::StoreInst(ID, DestCodePtr, BI);
+ } else {
+ // We need to jump through another cleanup block. Create a pad block
+ // with a branch instruction that jumps to the final destination and add
+ // it as a branch fixup to the current cleanup scope.
+
+ // Create the pad block.
+ llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
+
+ // Create a unique case ID.
+ llvm::ConstantInt *ID
+ = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ SI->getNumSuccessors());
+
+ // Store the jump destination before the branch instruction.
+ new llvm::StoreInst(ID, DestCodePtr, BI);
+
+ // Add it as the destination.
+ SI->addCase(ID, CleanupPad);
+
+ // Create the branch to the final destination.
+ llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
+ CleanupPad->getInstList().push_back(BI);
+
+ // And add it as a branch fixup.
+ CleanupEntries.back().BranchFixups.push_back(BI);
+ }
+ }
+ }
+
+ // Remove all blocks from the block scope map.
+ for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
+ assert(BlockScopes.count(Blocks[i]) &&
+ "Did not find block in scope map!");
+
+ BlockScopes.erase(Blocks[i]);
+ }
+
+ return CleanupBlockInfo(CleanupEntryBlock, SwitchBlock, EndBlock, EHOnly);
+}
+
+void CodeGenFunction::EmitCleanupBlock() {
+ CleanupBlockInfo Info = PopCleanupBlock();
+
+ if (Info.EHOnly) {
+ // FIXME: Add this to the exceptional edge
+ if (Info.CleanupBlock->getNumUses() == 0)
+ delete Info.CleanupBlock;
+ return;
+ }
+
+ // Scrub debug location info.
+ for (llvm::BasicBlock::iterator LBI = Info.CleanupBlock->begin(),
+ LBE = Info.CleanupBlock->end(); LBI != LBE; ++LBI)
+ Builder.SetInstDebugLocation(LBI);
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ if (CurBB && !CurBB->getTerminator() &&
+ Info.CleanupBlock->getNumUses() == 0) {
+ CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
+ delete Info.CleanupBlock;
+ } else
+ EmitBlock(Info.CleanupBlock);
+
+ if (Info.SwitchBlock)
+ EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ EmitBlock(Info.EndBlock);
+}
+
+void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) {
+ assert(!CleanupEntries.empty() &&
+ "Trying to add branch fixup without cleanup block!");
+
+ // FIXME: We could be more clever here and check if there's already a branch
+ // fixup for this destination and recycle it.
+ CleanupEntries.back().BranchFixups.push_back(BI);
+}
+
+void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) {
+ if (!HaveInsertPoint())
+ return;
+
+ llvm::BranchInst* BI = Builder.CreateBr(Dest);
+
+ Builder.ClearInsertionPoint();
+
+ // The stack is empty, no need to do any cleanup.
+ if (CleanupEntries.empty())
+ return;
+
+ if (!Dest->getParent()) {
+ // We are trying to branch to a block that hasn't been inserted yet.
+ AddBranchFixup(BI);
+ return;
+ }
+
+ BlockScopeMap::iterator I = BlockScopes.find(Dest);
+ if (I == BlockScopes.end()) {
+ // We are trying to jump to a block that is outside of any cleanup scope.
+ AddBranchFixup(BI);
+ return;
+ }
+
+ assert(I->second < CleanupEntries.size() &&
+ "Trying to branch into cleanup region");
+
+ if (I->second == CleanupEntries.size() - 1) {
+ // We have a branch to a block in the same scope.
+ return;
+ }
+
+ AddBranchFixup(BI);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..ece275e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,1390 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
+#define CLANG_CODEGEN_CODEGENFUNCTION_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ValueHandle.h"
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class BasicBlock;
+ class LLVMContext;
+ class MDNode;
+ class Module;
+ class SwitchInst;
+ class Twine;
+ class Value;
+}
+
+namespace clang {
+ class ASTContext;
+ class CXXDestructorDecl;
+ class CXXTryStmt;
+ class Decl;
+ class EnumConstantDecl;
+ class FunctionDecl;
+ class FunctionProtoType;
+ class LabelStmt;
+ class ObjCContainerDecl;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCPropertyImplDecl;
+ class TargetInfo;
+ class TargetCodeGenInfo;
+ class VarDecl;
+ class ObjCForCollectionStmt;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+
+namespace CodeGen {
+ class CodeGenTypes;
+ class CGDebugInfo;
+ class CGFunctionInfo;
+ class CGRecordLayout;
+ class CGBlockInfo;
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public BlockFunction {
+ CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+public:
+ CodeGenModule &CGM; // Per-module state.
+ const TargetInfo &Target;
+
+ typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+ CGBuilderTy Builder;
+
+ /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
+ /// This excludes BlockDecls.
+ const Decl *CurFuncDecl;
+ /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+ const Decl *CurCodeDecl;
+ const CGFunctionInfo *CurFnInfo;
+ QualType FnRetTy;
+ llvm::Function *CurFn;
+
+ /// CurGD - The GlobalDecl for the current function being compiled.
+ GlobalDecl CurGD;
+
+ /// ReturnBlock - Unified return block.
+ llvm::BasicBlock *ReturnBlock;
+ /// ReturnValue - The temporary alloca to hold the return value. This is null
+ /// iff the function has no return value.
+ llvm::Value *ReturnValue;
+
+ /// AllocaInsertPoint - This is an instruction in the entry block before which
+ /// we prefer to insert allocas.
+ llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+ const llvm::Type *LLVMIntTy;
+ uint32_t LLVMPointerWidth;
+
+ bool Exceptions;
+ bool CatchUndefined;
+
+ /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// when the NRVO has been applied to this variable.
+ llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
+
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+ /// PushCleanupBlock - Push a new cleanup entry on the stack and set the
+ /// passed in block as the cleanup block.
+ void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
+ llvm::BasicBlock *CleanupExitBlock,
+ llvm::BasicBlock *PreviousInvokeDest,
+ bool EHOnly = false);
+ void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock) {
+ PushCleanupBlock(CleanupEntryBlock, 0, getInvokeDest(), false);
+ }
+
+ /// CleanupBlockInfo - A struct representing a popped cleanup block.
+ struct CleanupBlockInfo {
+ /// CleanupEntryBlock - the cleanup entry block
+ llvm::BasicBlock *CleanupBlock;
+
+ /// SwitchBlock - the block (if any) containing the switch instruction used
+ /// for jumping to the final destination.
+ llvm::BasicBlock *SwitchBlock;
+
+ /// EndBlock - the default destination for the switch instruction.
+ llvm::BasicBlock *EndBlock;
+
+ /// EHOnly - True iff this cleanup should only be performed on the
+ /// exceptional edge.
+ bool EHOnly;
+
+ CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb,
+ llvm::BasicBlock *eb, bool ehonly = false)
+ : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb), EHOnly(ehonly) {}
+ };
+
+ /// EHCleanupBlock - RAII object that will create a cleanup block for the
+ /// exceptional edge and set the insert point to that block. When destroyed,
+ /// it creates the cleanup edge and sets the insert point to the previous
+ /// block.
+ class EHCleanupBlock {
+ CodeGenFunction& CGF;
+ llvm::BasicBlock *PreviousInsertionBlock;
+ llvm::BasicBlock *CleanupHandler;
+ llvm::BasicBlock *PreviousInvokeDest;
+ public:
+ EHCleanupBlock(CodeGenFunction &cgf)
+ : CGF(cgf),
+ PreviousInsertionBlock(CGF.Builder.GetInsertBlock()),
+ CleanupHandler(CGF.createBasicBlock("ehcleanup", CGF.CurFn)),
+ PreviousInvokeDest(CGF.getInvokeDest()) {
+ llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
+ CGF.Builder.SetInsertPoint(CleanupHandler);
+ CGF.setInvokeDest(TerminateHandler);
+ }
+ ~EHCleanupBlock();
+ };
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all
+ /// branch fixups and return a block info struct with the switch block and end
+ /// block. This will also reset the invoke handler to the previous value
+ /// from when the cleanup block was created.
+ CleanupBlockInfo PopCleanupBlock();
+
+ /// DelayedCleanupBlock - RAII object that will create a cleanup block and set
+ /// the insert point to that block. When destructed, it sets the insert point
+ /// to the previous block and pushes a new cleanup entry on the stack.
+ class DelayedCleanupBlock {
+ CodeGenFunction& CGF;
+ llvm::BasicBlock *CurBB;
+ llvm::BasicBlock *CleanupEntryBB;
+ llvm::BasicBlock *CleanupExitBB;
+ llvm::BasicBlock *CurInvokeDest;
+ bool EHOnly;
+
+ public:
+ DelayedCleanupBlock(CodeGenFunction &cgf, bool ehonly = false)
+ : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()),
+ CleanupEntryBB(CGF.createBasicBlock("cleanup")),
+ CleanupExitBB(0),
+ CurInvokeDest(CGF.getInvokeDest()),
+ EHOnly(ehonly) {
+ CGF.Builder.SetInsertPoint(CleanupEntryBB);
+ }
+
+ llvm::BasicBlock *getCleanupExitBlock() {
+ if (!CleanupExitBB)
+ CleanupExitBB = CGF.createBasicBlock("cleanup.exit");
+ return CleanupExitBB;
+ }
+
+ ~DelayedCleanupBlock() {
+ CGF.PushCleanupBlock(CleanupEntryBB, CleanupExitBB, CurInvokeDest,
+ EHOnly);
+ // FIXME: This is silly, move this into the builder.
+ if (CurBB)
+ CGF.Builder.SetInsertPoint(CurBB);
+ else
+ CGF.Builder.ClearInsertionPoint();
+ }
+ };
+
+ /// \brief Enters a new scope for capturing cleanups, all of which will be
+ /// executed once the scope is exited.
+ class CleanupScope {
+ CodeGenFunction& CGF;
+ size_t CleanupStackDepth;
+ bool OldDidCallStackSave;
+ bool PerformCleanup;
+
+ CleanupScope(const CleanupScope &); // DO NOT IMPLEMENT
+ CleanupScope &operator=(const CleanupScope &); // DO NOT IMPLEMENT
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit CleanupScope(CodeGenFunction &CGF)
+ : CGF(CGF), PerformCleanup(true)
+ {
+ CleanupStackDepth = CGF.CleanupEntries.size();
+ OldDidCallStackSave = CGF.DidCallStackSave;
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~CleanupScope() {
+ if (PerformCleanup) {
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.EmitCleanupBlocks(CleanupStackDepth);
+ }
+ }
+
+ /// \brief Determine whether this scope requires any cleanups.
+ bool requiresCleanups() const {
+ return CGF.CleanupEntries.size() > CleanupStackDepth;
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ assert(PerformCleanup && "Already forced cleanup");
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.EmitCleanupBlocks(CleanupStackDepth);
+ PerformCleanup = false;
+ }
+ };
+
+ /// CXXTemporariesCleanupScope - Enters a new scope for catching live
+ /// temporaries, all of which will be popped once the scope is exited.
+ class CXXTemporariesCleanupScope {
+ CodeGenFunction &CGF;
+ size_t NumLiveTemporaries;
+
+ // DO NOT IMPLEMENT
+ CXXTemporariesCleanupScope(const CXXTemporariesCleanupScope &);
+ CXXTemporariesCleanupScope &operator=(const CXXTemporariesCleanupScope &);
+
+ public:
+ explicit CXXTemporariesCleanupScope(CodeGenFunction &CGF)
+ : CGF(CGF), NumLiveTemporaries(CGF.LiveTemporaries.size()) { }
+
+ ~CXXTemporariesCleanupScope() {
+ while (CGF.LiveTemporaries.size() > NumLiveTemporaries)
+ CGF.PopCXXTemporary();
+ }
+ };
+
+
+ /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
+ /// blocks that have been added.
+ void EmitCleanupBlocks(size_t OldCleanupStackSize);
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert block
+ /// through the cleanup handling code (if any) and then on to \arg Dest.
+ ///
+ /// FIXME: Maybe this should really be in EmitBranch? Don't we always want
+ /// this behavior for branches?
+ void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+
+ /// BeginConditionalBranch - Should be called before a conditional part of an
+ /// expression is emitted. For example, before the RHS of the expression below
+ /// is emitted:
+ ///
+ /// b && f(T());
+ ///
+ /// This is used to make sure that any temporaries created in the conditional
+ /// branch are only destroyed if the branch is taken.
+ void BeginConditionalBranch() {
+ ++ConditionalBranchLevel;
+ }
+
+ /// EndConditionalBranch - Should be called after a conditional part of an
+ /// expression has been emitted.
+ void EndConditionalBranch() {
+ assert(ConditionalBranchLevel != 0 &&
+ "Conditional branch mismatch!");
+
+ --ConditionalBranchLevel;
+ }
+
+private:
+ CGDebugInfo *DebugInfo;
+
+ /// IndirectBranch - The first time an indirect goto is seen we create a block
+ /// with an indirect branch. Every time we see the address of a label taken,
+ /// we add the label to the indirect goto. Every subsequent indirect goto is
+ /// codegen'd as a jump to the IndirectBranch's basic block.
+ llvm::IndirectBrInst *IndirectBranch;
+
+ /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+ /// decls.
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+
+ /// LabelMap - This keeps track of the LLVM basic block for each C label.
+ llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap;
+
+ // BreakContinueStack - This keeps track of where break and continue
+ // statements should jump to.
+ struct BreakContinue {
+ BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb)
+ : BreakBlock(bb), ContinueBlock(cb) {}
+
+ llvm::BasicBlock *BreakBlock;
+ llvm::BasicBlock *ContinueBlock;
+ };
+ llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ /// SwitchInsn - This is nearest current switch instruction. It is null if if
+ /// current context is not in a switch.
+ llvm::SwitchInst *SwitchInsn;
+
+ /// CaseRangeBlock - This block holds if condition check for last case
+ /// statement range in current switch instruction.
+ llvm::BasicBlock *CaseRangeBlock;
+
+ /// InvokeDest - This is the nearest exception target for calls
+ /// which can unwind, when exceptions are being used.
+ llvm::BasicBlock *InvokeDest;
+
+ // VLASizeMap - This keeps track of the associated size for each VLA type.
+ // We track this by the size expression rather than the type itself because
+ // in certain situations, like a const qualifier applied to an VLA typedef,
+ // multiple VLA types can share the same size expression.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
+
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
+ struct CleanupEntry {
+ /// CleanupEntryBlock - The block of code that does the actual cleanup.
+ llvm::BasicBlock *CleanupEntryBlock;
+
+ /// CleanupExitBlock - The cleanup exit block.
+ llvm::BasicBlock *CleanupExitBlock;
+
+ /// Blocks - Basic blocks that were emitted in the current cleanup scope.
+ std::vector<llvm::BasicBlock *> Blocks;
+
+ /// BranchFixups - Branch instructions to basic blocks that haven't been
+ /// inserted into the current function yet.
+ std::vector<llvm::BranchInst *> BranchFixups;
+
+ /// PreviousInvokeDest - The invoke handler from the start of the cleanup
+ /// region.
+ llvm::BasicBlock *PreviousInvokeDest;
+
+ /// EHOnly - Perform this only on the exceptional edge, not the main edge.
+ bool EHOnly;
+
+ explicit CleanupEntry(llvm::BasicBlock *CleanupEntryBlock,
+ llvm::BasicBlock *CleanupExitBlock,
+ llvm::BasicBlock *PreviousInvokeDest,
+ bool ehonly)
+ : CleanupEntryBlock(CleanupEntryBlock),
+ CleanupExitBlock(CleanupExitBlock),
+ PreviousInvokeDest(PreviousInvokeDest),
+ EHOnly(ehonly) {}
+ };
+
+ /// CleanupEntries - Stack of cleanup entries.
+ llvm::SmallVector<CleanupEntry, 8> CleanupEntries;
+
+ typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap;
+
+ /// BlockScopes - Map of which "cleanup scope" scope basic blocks have.
+ BlockScopeMap BlockScopes;
+
+ /// CXXThisDecl - When generating code for a C++ member function,
+ /// this will hold the implicit 'this' declaration.
+ ImplicitParamDecl *CXXThisDecl;
+ llvm::Value *CXXThisValue;
+
+ /// CXXVTTDecl - When generating code for a base object constructor or
+ /// base object destructor with virtual bases, this will hold the implicit
+ /// VTT parameter.
+ ImplicitParamDecl *CXXVTTDecl;
+ llvm::Value *CXXVTTValue;
+
+ /// CXXLiveTemporaryInfo - Holds information about a live C++ temporary.
+ struct CXXLiveTemporaryInfo {
+ /// Temporary - The live temporary.
+ const CXXTemporary *Temporary;
+
+ /// ThisPtr - The pointer to the temporary.
+ llvm::Value *ThisPtr;
+
+ /// DtorBlock - The destructor block.
+ llvm::BasicBlock *DtorBlock;
+
+ /// CondPtr - If this is a conditional temporary, this is the pointer to the
+ /// condition variable that states whether the destructor should be called
+ /// or not.
+ llvm::Value *CondPtr;
+
+ CXXLiveTemporaryInfo(const CXXTemporary *temporary,
+ llvm::Value *thisptr, llvm::BasicBlock *dtorblock,
+ llvm::Value *condptr)
+ : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock),
+ CondPtr(condptr) { }
+ };
+
+ llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries;
+
+ /// ConditionalBranchLevel - Contains the nesting level of the current
+ /// conditional branch. This is used so that we know if a temporary should be
+ /// destroyed conditionally.
+ unsigned ConditionalBranchLevel;
+
+
+ /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
+ /// type as well as the field number that contains the actual data.
+ llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
+ unsigned> > ByRefValueInfo;
+
+ /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
+ /// number that holds the value.
+ unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+
+ llvm::BasicBlock *TerminateHandler;
+ llvm::BasicBlock *TrapBB;
+
+ int UniqueAggrDestructorCount;
+public:
+ CodeGenFunction(CodeGenModule &cgm);
+
+ ASTContext &getContext() const;
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+
+ llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
+ void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
+
+ llvm::LLVMContext &getLLVMContext() { return VMContext; }
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C
+ //===--------------------------------------------------------------------===//
+
+ void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+ void StartObjCMethod(const ObjCMethodDecl *MD,
+ const ObjCContainerDecl *CD);
+
+ /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+ void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD, bool ctor);
+
+ /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+ /// for the given property.
+ void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
+ bool IvarTypeWithAggrGCObjects(QualType Ty);
+
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
+ llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
+ bool BlockHasCopyDispose,
+ CharUnits Size,
+ const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr,
+ CGBlockInfo &Info,
+ const Decl *OuterFuncDecl,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm);
+
+ llvm::Value *LoadBlockStruct();
+
+ void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
+ void AllocateBlockDecl(const BlockDeclRefExpr *E);
+ llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
+ return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
+ }
+ llvm::Value *GetAddrOfBlockDecl(const ValueDecl *D, bool ByRef);
+ const llvm::Type *BuildByRefType(const ValueDecl *D);
+
+ void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
+ void StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc);
+
+ void EmitConstructorBody(FunctionArgList &Args);
+ void EmitDestructorBody(FunctionArgList &Args);
+ void EmitFunctionBody(FunctionArgList &Args);
+
+ /// EmitReturnBlock - Emit the unified return block, trying to avoid its
+ /// emission when possible.
+ void EmitReturnBlock();
+
+ /// FinishFunction - Complete IR generation of the current function. It is
+ /// legal to call this function even if there is no current insertion point.
+ void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+ /// GenerateThunk - Generate a thunk for the given method.
+ void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
+
+ void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
+ FunctionArgList &Args);
+
+ /// InitializeVTablePointer - Initialize the vtable pointer of the given
+ /// subobject.
+ ///
+ void InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+ void InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases);
+
+ void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+
+
+ /// EmitDtorEpilogue - Emit all code that comes at the end of class's
+ /// destructor. This is to call destructors on members and base classes in
+ /// reverse order of their construction.
+ void EmitDtorEpilogue(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type);
+
+ /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+ /// arguments for the given function. This is also responsible for naming the
+ /// LLVM function arguments.
+ void EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args);
+
+ /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+ /// given temporary.
+ void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue);
+
+ /// EmitStartEHSpec - Emit the start of the exception spec.
+ void EmitStartEHSpec(const Decl *D);
+
+ /// EmitEndEHSpec - Emit the end of the exception spec.
+ void EmitEndEHSpec(const Decl *D);
+
+ /// getTerminateHandler - Return a handler that just calls terminate.
+ llvm::BasicBlock *getTerminateHandler();
+
+ const llvm::Type *ConvertTypeForMem(QualType T);
+ const llvm::Type *ConvertType(QualType T);
+ const llvm::Type *ConvertType(const TypeDecl *T) {
+ return ConvertType(getContext().getTypeDeclType(T));
+ }
+
+ /// LoadObjCSelf - Load the value of self. This function is only valid while
+ /// generating code for an Objective-C method.
+ llvm::Value *LoadObjCSelf();
+
+ /// TypeOfSelfObject - Return type of object that this self represents.
+ QualType TypeOfSelfObject();
+
+ /// hasAggregateLLVMType - Return true if the specified AST type will map into
+ /// an aggregate LLVM type or is void.
+ static bool hasAggregateLLVMType(QualType T);
+
+ /// createBasicBlock - Create an LLVM basic block.
+ llvm::BasicBlock *createBasicBlock(const char *Name="",
+ llvm::Function *Parent=0,
+ llvm::BasicBlock *InsertBefore=0) {
+#ifdef NDEBUG
+ return llvm::BasicBlock::Create(VMContext, "", Parent, InsertBefore);
+#else
+ return llvm::BasicBlock::Create(VMContext, Name, Parent, InsertBefore);
+#endif
+ }
+
+ /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+ /// label maps to.
+ llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S);
+
+ /// SimplifyForwardingBlocks - If the given basic block is only a branch to
+ /// another basic block, simplify it. This assumes that no other code could
+ /// potentially reference the basic block.
+ void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+ /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+ /// adding a fall-through branch from the current insert block if
+ /// necessary. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// IsFinished - If true, indicates that the caller has finished emitting
+ /// branches to the given block and does not expect to emit code into it. This
+ /// means the block can be ignored if it is unreachable.
+ void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+ /// EmitBranch - Emit a branch to the specified basic block from the current
+ /// insert block, taking care to avoid creation of branches from dummy
+ /// blocks. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// This function clears the current insertion point. The caller should follow
+ /// calls to this function with calls to Emit*Block prior to generation new
+ /// code.
+ void EmitBranch(llvm::BasicBlock *Block);
+
+ /// HaveInsertPoint - True if an insertion point is defined. If not, this
+ /// indicates that the current code being emitted is unreachable.
+ bool HaveInsertPoint() const {
+ return Builder.GetInsertBlock() != 0;
+ }
+
+ /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+ /// emitted IR has a place to go. Note that by definition, if this function
+ /// creates a block then that block is unreachable; callers may do better to
+ /// detect when no insertion point is defined and simply skip IR generation.
+ void EnsureInsertPoint() {
+ if (!HaveInsertPoint())
+ EmitBlock(createBasicBlock());
+ }
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ //===--------------------------------------------------------------------===//
+ // Helpers
+ //===--------------------------------------------------------------------===//
+
+ Qualifiers MakeQualifiers(QualType T) {
+ Qualifiers Quals = getContext().getCanonicalType(T).getQualifiers();
+ Quals.setObjCGCAttr(getContext().getObjCGCAttrKind(T));
+ return Quals;
+ }
+
+ /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+ /// block. The caller is responsible for setting an appropriate alignment on
+ /// the alloca.
+ llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
+ const llvm::Twine &Name = "tmp");
+
+ /// InitTempAlloca - Provide an initial value for the given alloca.
+ void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
+
+ /// CreateIRTemp - Create a temporary IR object of the given type, with
+ /// appropriate alignment. This routine should only be used when an temporary
+ /// value needs to be stored into an alloca (for example, to avoid explicit
+ /// PHI construction), but the type is the IR type, not the type appropriate
+ /// for storing in memory.
+ llvm::Value *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
+
+ /// CreateMemTemp - Create a temporary memory object of the given type, with
+ /// appropriate alignment.
+ llvm::Value *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+
+ /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+ /// expression and compare the result against zero, returning an Int1Ty value.
+ llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+ /// EmitAnyExpr - Emit code to compute the specified expression which can have
+ /// any type. The result is returned as an RValue struct. If this is an
+ /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+ /// the result should be returned.
+ ///
+ /// \param IgnoreResult - True if the resulting value isn't used.
+ RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0,
+ bool IsAggLocVolatile = false, bool IgnoreResult = false,
+ bool IsInitializer = false);
+
+ // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+ // or the value of the expression, depending on how va_list is defined.
+ llvm::Value *EmitVAListRef(const Expr *E);
+
+ /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// always be accessible even if no aggregate location is provided.
+ RValue EmitAnyExprToTemp(const Expr *E, bool IsAggLocVolatile = false,
+ bool IsInitializer = false);
+
+ /// EmitsAnyExprToMem - Emits the code necessary to evaluate an
+ /// arbitrary expression into the given memory location.
+ void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
+ bool IsLocationVolatile = false,
+ bool IsInitializer = false);
+
+ /// EmitAggregateCopy - Emit an aggrate copy.
+ ///
+ /// \param isVolatile - True iff either the source or the destination is
+ /// volatile.
+ void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false);
+
+ /// StartBlock - Start new block named N. If insert block is a dummy block
+ /// then reuse it.
+ void StartBlock(const char *N);
+
+ /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
+ llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD);
+
+ /// GetAddrOfLocalVar - Return the address of a local variable.
+ llvm::Value *GetAddrOfLocalVar(const VarDecl *VD);
+
+ /// getAccessedFieldNo - Given an encoded value and a result number, return
+ /// the input field number being accessed.
+ static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+ llvm::BlockAddress *GetAddrOfLabel(const LabelStmt *L);
+ llvm::BasicBlock *GetIndirectGotoBlock();
+
+ /// EmitNullInitialization - Generate code to set a value of the given type to
+ /// null, If the type contains data member pointers, they will be initialized
+ /// to -1 in accordance with the Itanium C++ ABI.
+ void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
+
+ // EmitVAArg - Generate code to get an argument from the passed in pointer
+ // and update it accordingly. The return value is a pointer to the argument.
+ // FIXME: We should be able to get rid of this method and use the va_arg
+ // instruction in LLVM instead once it works well enough.
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+
+ /// EmitVLASize - Generate code for any VLA size expressions that might occur
+ /// in a variably modified type. If Ty is a VLA, will return the value that
+ /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ llvm::Value *EmitVLASize(QualType Ty);
+
+ // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
+ // of a variable length array type.
+ llvm::Value *GetVLASize(const VariableArrayType *);
+
+ /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+ /// generating code for an C++ member function.
+ llvm::Value *LoadCXXThis() {
+ assert(CXXThisValue && "no 'this' value for this function");
+ return CXXThisValue;
+ }
+
+ /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
+ /// virtual bases.
+ llvm::Value *LoadCXXVTT() {
+ assert(CXXVTTValue && "no VTT value for this function");
+ return CXXVTTValue;
+ }
+
+ /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
+ /// complete class to the given direct base.
+ llvm::Value *
+ GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual);
+
+ /// GetAddressOfBaseClass - This function will add the necessary delta to the
+ /// load of 'this' and returns address of the base class.
+ llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
+ bool NullCheckValue);
+
+ llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
+ bool NullCheckValue);
+
+ llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl);
+
+ void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args);
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This);
+
+ void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *This);
+
+ llvm::Constant *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This);
+
+ void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+ bool ForVirtualBase, llvm::Value *This);
+
+ void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
+ void PopCXXTemporary();
+
+ llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+ void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
+
+ void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
+ QualType DeleteTy);
+
+ llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+ llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+
+ void EmitCheck(llvm::Value *, unsigned Size);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ //===--------------------------------------------------------------------===//
+ // Declaration Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitDecl - Emit a declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitDecl(const Decl &D);
+
+ /// EmitBlockVarDecl - Emit a block variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitBlockVarDecl(const VarDecl &D);
+
+ /// EmitLocalBlockVarDecl - Emit a local block variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitLocalBlockVarDecl(const VarDecl &D);
+
+ void EmitStaticBlockVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+ void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+ void EmitStopPoint(const Stmt *S);
+
+ /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+ /// this function even if there is no current insertion point.
+ ///
+ /// This function may clear the current insertion point; callers should use
+ /// EnsureInsertPoint if they wish to subsequently generate code without first
+ /// calling EmitBlock, EmitBranch, or EmitStmt.
+ void EmitStmt(const Stmt *S);
+
+ /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+ /// necessarily require an insertion point or debug information; typically
+ /// because the statement amounts to a jump or a container of other
+ /// statements.
+ ///
+ /// \return True if the statement was handled.
+ bool EmitSimpleStmt(const Stmt *S);
+
+ RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ llvm::Value *AggLoc = 0, bool isAggVol = false);
+
+ /// EmitLabel - Emit the block for the given label. It is legal to call this
+ /// function even if there is no current insertion point.
+ void EmitLabel(const LabelStmt &S); // helper for EmitLabelStmt.
+
+ void EmitLabelStmt(const LabelStmt &S);
+ void EmitGotoStmt(const GotoStmt &S);
+ void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+ void EmitIfStmt(const IfStmt &S);
+ void EmitWhileStmt(const WhileStmt &S);
+ void EmitDoStmt(const DoStmt &S);
+ void EmitForStmt(const ForStmt &S);
+ void EmitReturnStmt(const ReturnStmt &S);
+ void EmitDeclStmt(const DeclStmt &S);
+ void EmitBreakStmt(const BreakStmt &S);
+ void EmitContinueStmt(const ContinueStmt &S);
+ void EmitSwitchStmt(const SwitchStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S);
+ void EmitCaseStmt(const CaseStmt &S);
+ void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitAsmStmt(const AsmStmt &S);
+
+ void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+ void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+ void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+ void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+
+ llvm::Constant *getUnwindResumeOrRethrowFn();
+ struct CXXTryStmtInfo {
+ llvm::BasicBlock *SavedLandingPad;
+ llvm::BasicBlock *HandlerBlock;
+ llvm::BasicBlock *FinallyBlock;
+ };
+ CXXTryStmtInfo EnterCXXTryStmt(const CXXTryStmt &S);
+ void ExitCXXTryStmt(const CXXTryStmt &S, CXXTryStmtInfo Info);
+
+ void EmitCXXTryStmt(const CXXTryStmt &S);
+
+ //===--------------------------------------------------------------------===//
+ // LValue Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+ RValue GetUndefRValue(QualType Ty);
+
+ /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+ /// and issue an ErrorUnsupported style diagnostic (using the
+ /// provided Name).
+ RValue EmitUnsupportedRValue(const Expr *E,
+ const char *Name);
+
+ /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+ /// an ErrorUnsupported style diagnostic (using the provided Name).
+ LValue EmitUnsupportedLValue(const Expr *E,
+ const char *Name);
+
+ /// EmitLValue - Emit code to compute a designator that specifies the location
+ /// of the expression.
+ ///
+ /// This can return one of two things: a simple address or a bitfield
+ /// reference. In either case, the LLVM Value* in the LValue structure is
+ /// guaranteed to be an LLVM pointer type.
+ ///
+ /// If this returns a bitfield reference, nothing about the pointee type of
+ /// the LLVM value is known: For example, it may not be a pointer to an
+ /// integer.
+ ///
+ /// If this returns a normal address, and if the lvalue's C type is fixed
+ /// size, this method guarantees that the returned pointer type will point to
+ /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+ /// variable length type, this is not possible.
+ ///
+ LValue EmitLValue(const Expr *E);
+
+ /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
+ /// checking code to guard against undefined behavior. This is only
+ /// suitable when we know that the address will be used to access the
+ /// object.
+ LValue EmitCheckedLValue(const Expr *E);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ QualType Ty);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, QualType Ty);
+
+ /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+ /// this method emits the address of the lvalue, then loads the result as an
+ /// rvalue, returning the rvalue.
+ RValue EmitLoadOfLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfPropertyRefLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfKVCRefLValue(LValue LV, QualType ExprType);
+
+
+ /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+ /// lvalue, where both are guaranteed to the have the same type, and that type
+ /// is 'Ty'.
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
+ QualType Ty);
+ void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughKVCRefLValue(RValue Src, LValue Dst, QualType Ty);
+
+ /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
+ /// EmitStoreThroughLValue.
+ ///
+ /// \param Result [out] - If non-null, this will be set to a Value* for the
+ /// bit-field contents after the store, appropriate for use as the result of
+ /// an assignment to the bit-field.
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
+ llvm::Value **Result=0);
+
+ // Note: only availabe for agg return types
+ LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E);
+ // Note: only available for agg return types
+ LValue EmitCallExprLValue(const CallExpr *E);
+ // Note: only available for agg return types
+ LValue EmitVAArgExprLValue(const VAArgExpr *E);
+ LValue EmitDeclRefLValue(const DeclRefExpr *E);
+ LValue EmitStringLiteralLValue(const StringLiteral *E);
+ LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+ LValue EmitPredefinedFunctionName(unsigned Type);
+ LValue EmitPredefinedLValue(const PredefinedExpr *E);
+ LValue EmitUnaryOpLValue(const UnaryOperator *E);
+ LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ LValue EmitMemberExpr(const MemberExpr *E);
+ LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
+ LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
+ LValue EmitCastLValue(const CastExpr *E);
+ LValue EmitNullInitializationLValue(const CXXZeroInitValueExpr *E);
+
+ llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForAnonRecordField(llvm::Value* Base,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers);
+ LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
+ /// if the Field is a reference, this will return the address of the reference
+ /// and not the address of the value stored in the reference.
+ LValue EmitLValueForFieldInitialization(llvm::Value* Base,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value* Base, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
+
+ LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+ LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+ LValue EmitCXXExprWithTemporariesLValue(const CXXExprWithTemporaries *E);
+ LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+
+ LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+ LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+ LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
+ LValue EmitObjCKVCRefLValue(const ObjCImplicitSetterGetterRefExpr *E);
+ LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
+ LValue EmitStmtExprLValue(const StmtExpr *E);
+ LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+
+ //===--------------------------------------------------------------------===//
+ // Scalar Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitCall - Generate a call of the given function, expecting the given
+ /// result type, and using the given argument list which specifies both the
+ /// LLVM arguments and the types they were derived from.
+ ///
+ /// \param TargetDecl - If given, the decl of the function in a direct call;
+ /// used to set attributes on the call (noreturn, etc.).
+ RValue EmitCall(const CGFunctionInfo &FnInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &Args,
+ const Decl *TargetDecl = 0,
+ llvm::Instruction **callOrInvoke = 0);
+
+ RValue EmitCall(QualType FnType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl = 0);
+ RValue EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue = ReturnValueSlot());
+
+ llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ const llvm::Type *Ty);
+ llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *&This, const llvm::Type *Ty);
+
+ RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+ RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+ RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue);
+
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E);
+
+ RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
+
+ /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+ /// is unhandled by the current target.
+ llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+ llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+ RValue EmitObjCPropertyGet(const Expr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+ RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S,
+ ReturnValueSlot Return = ReturnValueSlot());
+ void EmitObjCPropertySet(const Expr *E, RValue Src);
+ void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
+
+
+ /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
+ /// expression. Will emit a temporary variable if E is not an LValue.
+ RValue EmitReferenceBindingToExpr(const Expr* E, bool IsInitializer = false);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ // Expressions are broken into three classes: scalar, complex, aggregate.
+
+ /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+ /// scalar type, returning the result.
+ llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+ QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+ QualType DstTy);
+
+
+ /// EmitAggExpr - Emit the computation of the specified expression of
+ /// aggregate type. The result is computed into DestPtr. Note that if
+ /// DestPtr is null, the value of the aggregate expression is not needed.
+ void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest,
+ bool IgnoreResult = false, bool IsInitializer = false,
+ bool RequiresGCollection = false);
+
+ /// EmitAggExprToLValue - Emit the computation of the specified expression of
+ /// aggregate type into a temporary LValue.
+ LValue EmitAggExprToLValue(const Expr *E);
+
+ /// EmitGCMemmoveCollectable - Emit special API for structs with object
+ /// pointers.
+ void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType Ty);
+
+ /// EmitComplexExpr - Emit the computation of the specified expression of
+ /// complex type, returning the result.
+ ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
+ bool IgnoreImag = false,
+ bool IgnoreRealAssign = false,
+ bool IgnoreImagAssign = false);
+
+ /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+ /// of complex type, storing into the specified Value*.
+ void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+
+ /// StoreComplexToAddr - Store a complex number into the specified address.
+ void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+ /// LoadComplexFromAddr - Load a complex number from the specified address.
+ ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
+
+ /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global for a
+ /// static block var decl.
+ llvm::GlobalVariable *CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+ /// global variable that has already been created for it. If the initializer
+ /// has a different type than GV does, this may free GV and return a different
+ /// one. Otherwise it just returns GV.
+ llvm::GlobalVariable *
+ AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+
+ /// EmitStaticCXXBlockVarDeclInit - Create the initializer for a C++ runtime
+ /// initialized static block var decl.
+ void EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+ /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
+ /// variable with global storage.
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr);
+
+ /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
+ /// with the C++ runtime so that its destructor will be called at exit.
+ void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+ llvm::Constant *DeclPtr);
+
+ /// GenerateCXXGlobalInitFunc - Generates code for initializing global
+ /// variables.
+ void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls);
+
+ /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
+ /// variables.
+ void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::Constant*,
+ llvm::Constant*> > &DtorsAndObjects);
+
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
+
+ void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
+
+ RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc = 0,
+ bool IsAggLocVolatile = false,
+ bool IsInitializer = false);
+
+ void EmitCXXThrowExpr(const CXXThrowExpr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Internal Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// ContainsLabel - Return true if the statement contains a label in it. If
+ /// this statement is not executed normally, it not containing a label means
+ /// that we can just remove the code.
+ static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return 0. If it
+ /// constant folds to 'true' and does not contain a label, return 1, if it
+ /// constant folds to 'false' and does not contain a label, return -1.
+ int ConstantFoldsToSimpleInteger(const Expr *Cond);
+
+ /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+ /// if statement) to the specified blocks. Based on the condition, this might
+ /// try to simplify the codegen of the conditional based on the branch.
+ void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock);
+
+ /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
+ /// generate a branch around the created basic block as necessary.
+ llvm::BasicBlock* getTrapBB();
+
+ /// EmitCallArg - Emit a single call argument.
+ RValue EmitCallArg(const Expr *E, QualType ArgType);
+
+ /// EmitDelegateCallArg - We are performing a delegate call; that
+ /// is, the current function is delegating to another one. Produce
+ /// a r-value suitable for passing the given parameter.
+ RValue EmitDelegateCallArg(const VarDecl *Param);
+
+private:
+
+ void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+ /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+ /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+ ///
+ /// \param AI - The first function argument of the expansion.
+ /// \return The argument following the last expanded function
+ /// argument.
+ llvm::Function::arg_iterator
+ ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ llvm::Function::arg_iterator AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg Args. See
+ /// ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue Src,
+ llvm::SmallVector<llvm::Value*, 16> &Args);
+
+ llvm::Value* EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr, std::string &ConstraintStr);
+
+ /// EmitCleanupBlock - emits a single cleanup block.
+ void EmitCleanupBlock();
+
+ /// AddBranchFixup - adds a branch instruction to the list of fixups for the
+ /// current cleanup scope.
+ void AddBranchFixup(llvm::BranchInst *BI);
+
+ /// EmitCallArgs - Emit call arguments for a function.
+ /// The CallArgTypeInfo parameter is used for iterating over the known
+ /// argument types of the function being called.
+ template<typename T>
+ void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+
+ // First, use the argument types that the type info knows about
+ if (CallArgTypeInfo) {
+ for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
+ E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ QualType ArgType = *I;
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ }
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+ }
+
+ const TargetCodeGenInfo &getTargetHooks() const {
+ return CGM.getTargetCodeGenInfo();
+ }
+};
+
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..103024c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,2006 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGCall.h"
+#include "CGObjCRuntime.h"
+#include "Mangle.h"
+#include "TargetInfo.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Module.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
+ llvm::Module &M, const llvm::TargetData &TD,
+ Diagnostic &diags)
+ : BlockModule(C, M, TD, Types, *this), Context(C),
+ Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
+ TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+ Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()),
+ VTables(*this), Runtime(0), ABI(0),
+ CFConstantStringClassRef(0),
+ NSConstantStringClassRef(0),
+ VMContext(M.getContext()) {
+
+ if (!Features.ObjC1)
+ Runtime = 0;
+ else if (!Features.NeXTRuntime)
+ Runtime = CreateGNUObjCRuntime(*this);
+ else if (Features.ObjCNonFragileABI)
+ Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+ else
+ Runtime = CreateMacObjCRuntime(*this);
+
+ if (!Features.CPlusPlus)
+ ABI = 0;
+ else createCXXABI();
+
+ // If debug info generation is enabled, create the CGDebugInfo object.
+ DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
+}
+
+CodeGenModule::~CodeGenModule() {
+ delete Runtime;
+ delete ABI;
+ delete DebugInfo;
+}
+
+void CodeGenModule::createObjCRuntime() {
+ if (!Features.NeXTRuntime)
+ Runtime = CreateGNUObjCRuntime(*this);
+ else if (Features.ObjCNonFragileABI)
+ Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+ else
+ Runtime = CreateMacObjCRuntime(*this);
+}
+
+void CodeGenModule::createCXXABI() {
+ // For now, just create an Itanium ABI.
+ ABI = CreateItaniumCXXABI(*this);
+}
+
+void CodeGenModule::Release() {
+ EmitDeferred();
+ EmitCXXGlobalInitFunc();
+ EmitCXXGlobalDtorFunc();
+ if (Runtime)
+ if (llvm::Function *ObjCInitFunction = Runtime->ModuleInitFunction())
+ AddGlobalCtor(ObjCInitFunction);
+ EmitCtorList(GlobalCtors, "llvm.global_ctors");
+ EmitCtorList(GlobalDtors, "llvm.global_dtors");
+ EmitAnnotations();
+ EmitLLVMUsed();
+}
+
+bool CodeGenModule::isTargetDarwin() const {
+ return getContext().Target.getTriple().getOS() == llvm::Triple::Darwin;
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+ << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+LangOptions::VisibilityMode
+CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (VD->getStorageClass() == VarDecl::PrivateExtern)
+ return LangOptions::Hidden;
+
+ if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
+ switch (attr->getVisibility()) {
+ default: assert(0 && "Unknown visibility!");
+ case VisibilityAttr::DefaultVisibility:
+ return LangOptions::Default;
+ case VisibilityAttr::HiddenVisibility:
+ return LangOptions::Hidden;
+ case VisibilityAttr::ProtectedVisibility:
+ return LangOptions::Protected;
+ }
+ }
+
+ // This decl should have the same visibility as its parent.
+ if (const DeclContext *DC = D->getDeclContext())
+ return getDeclVisibilityMode(cast<Decl>(DC));
+
+ return getLangOptions().getVisibilityMode();
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+ const Decl *D) const {
+ // Internal definitions always have default visibility.
+ if (GV->hasLocalLinkage()) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ return;
+ }
+
+ switch (getDeclVisibilityMode(D)) {
+ default: assert(0 && "Unknown visibility!");
+ case LangOptions::Default:
+ return GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ case LangOptions::Hidden:
+ return GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ case LangOptions::Protected:
+ return GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ }
+}
+
+void CodeGenModule::getMangledName(MangleBuffer &Buffer, GlobalDecl GD) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
+
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ return getMangledCXXCtorName(Buffer, D, GD.getCtorType());
+ if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ return getMangledCXXDtorName(Buffer, D, GD.getDtorType());
+
+ return getMangledName(Buffer, ND);
+}
+
+/// \brief Retrieves the mangled name for the given declaration.
+///
+/// If the given declaration requires a mangled name, returns an
+/// const char* containing the mangled name. Otherwise, returns
+/// the unmangled name.
+///
+void CodeGenModule::getMangledName(MangleBuffer &Buffer,
+ const NamedDecl *ND) {
+ if (!getMangleContext().shouldMangleDeclName(ND)) {
+ assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
+ Buffer.setString(ND->getNameAsCString());
+ return;
+ }
+
+ getMangleContext().mangleName(ND, Buffer.getBuffer());
+}
+
+llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
+ return getModule().getNamedValue(Name);
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalCtors.push_back(std::make_pair(Ctor, Priority));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalDtors.push_back(std::make_pair(Dtor, Priority));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+ // Ctor function type is void()*.
+ llvm::FunctionType* CtorFTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ std::vector<const llvm::Type*>(),
+ false);
+ llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+ // Get the type of a ctor entry, { i32, void ()* }.
+ llvm::StructType* CtorStructTy =
+ llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext),
+ llvm::PointerType::getUnqual(CtorFTy), NULL);
+
+ // Construct the constructor and destructor arrays.
+ std::vector<llvm::Constant*> Ctors;
+ for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ std::vector<llvm::Constant*> S;
+ S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ I->second, false));
+ S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
+ Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ }
+
+ if (!Ctors.empty()) {
+ llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+ new llvm::GlobalVariable(TheModule, AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Ctors),
+ GlobalName);
+ }
+}
+
+void CodeGenModule::EmitAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array =
+ llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
+ Annotations.size()),
+ Annotations);
+ llvm::GlobalValue *gv =
+ new llvm::GlobalVariable(TheModule, Array->getType(), false,
+ llvm::GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations");
+ gv->setSection("llvm.metadata");
+}
+
+static CodeGenModule::GVALinkage
+GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
+ const LangOptions &Features) {
+ CodeGenModule::GVALinkage External = CodeGenModule::GVA_StrongExternal;
+
+ Linkage L = FD->getLinkage();
+ if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
+ FD->getType()->getLinkage() == UniqueExternalLinkage)
+ L = UniqueExternalLinkage;
+
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return CodeGenModule::GVA_Internal;
+
+ case ExternalLinkage:
+ switch (FD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ External = CodeGenModule::GVA_StrongExternal;
+ break;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return CodeGenModule::GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ImplicitInstantiation:
+ External = CodeGenModule::GVA_TemplateInstantiation;
+ break;
+ }
+ }
+
+ if (!FD->isInlined())
+ return External;
+
+ if (!Features.CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
+ // GNU or C99 inline semantics. Determine whether this symbol should be
+ // externally visible.
+ if (FD->isInlineDefinitionExternallyVisible())
+ return External;
+
+ // C99 inline semantics, where the symbol is not externally visible.
+ return CodeGenModule::GVA_C99Inline;
+ }
+
+ // C++0x [temp.explicit]p9:
+ // [ Note: The intent is that an inline function that is the subject of
+ // an explicit instantiation declaration will still be implicitly
+ // instantiated when used so that the body can be considered for
+ // inlining, but that no out-of-line copy of the inline function would be
+ // generated in the translation unit. -- end note ]
+ if (FD->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return CodeGenModule::GVA_C99Inline;
+
+ return CodeGenModule::GVA_CXXInline;
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
+ GVALinkage Linkage = GetLinkageForFunction(getContext(), D, Features);
+
+ if (Linkage == GVA_Internal) {
+ return llvm::Function::InternalLinkage;
+ } else if (D->hasAttr<DLLExportAttr>()) {
+ return llvm::Function::DLLExportLinkage;
+ } else if (D->hasAttr<WeakAttr>()) {
+ return llvm::Function::WeakAnyLinkage;
+ } else if (Linkage == GVA_C99Inline) {
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ return llvm::Function::AvailableExternallyLinkage;
+ } else if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) {
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ return llvm::Function::LinkOnceODRLinkage;
+ } else if (Linkage == GVA_ExplicitTemplateInstantiation) {
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ return llvm::Function::WeakODRLinkage;
+ } else {
+ assert(Linkage == GVA_StrongExternal);
+ // Otherwise, we have strong external linkage.
+ return llvm::Function::ExternalLinkage;
+ }
+}
+
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV) {
+ SetCommonAttributes(D, GV);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F) {
+ unsigned CallingConv;
+ AttributeListType AttributeList;
+ ConstructAttributeList(Info, D, AttributeList, CallingConv);
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.size()));
+ F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+ llvm::Function *F) {
+ if (!Features.Exceptions && !Features.ObjCNonFragileABI)
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ if (D->hasAttr<AlwaysInlineAttr>())
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
+ if (D->hasAttr<NoInlineAttr>())
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ if (Features.getStackProtectorMode() == LangOptions::SSPOn)
+ F->addFnAttr(llvm::Attribute::StackProtect);
+ else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
+ F->addFnAttr(llvm::Attribute::StackProtectReq);
+
+ if (const AlignedAttr *AA = D->getAttr<AlignedAttr>()) {
+ unsigned width = Context.Target.getCharWidth();
+ F->setAlignment(AA->getAlignment() / width);
+ while ((AA = AA->getNext<AlignedAttr>()))
+ F->setAlignment(std::max(F->getAlignment(), AA->getAlignment() / width));
+ }
+ // C++ ABI requires 2-byte alignment for member functions.
+ if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
+ F->setAlignment(2);
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ setGlobalVisibility(GV, D);
+
+ if (D->hasAttr<UsedAttr>())
+ AddUsedGlobal(GV);
+
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+ llvm::Function *F,
+ const CGFunctionInfo &FI) {
+ SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributesForDefinition(D, F);
+
+ F->setLinkage(llvm::Function::InternalLinkage);
+
+ SetCommonAttributes(D, F);
+}
+
+void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (!IsIncompleteFunction)
+ SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(GD), F);
+
+ // Only a few attributes are set on declarations; these may later be
+ // overridden by a definition.
+
+ if (FD->hasAttr<DLLImportAttr>()) {
+ F->setLinkage(llvm::Function::DLLImportLinkage);
+ } else if (FD->hasAttr<WeakAttr>() ||
+ FD->hasAttr<WeakImportAttr>()) {
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else {
+ F->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
+}
+
+void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMUsed.push_back(GV);
+}
+
+void CodeGenModule::EmitLLVMUsed() {
+ // Don't create llvm.used if there is no need.
+ if (LLVMUsed.empty())
+ return;
+
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // Convert LLVMUsed to what ConstantArray needs.
+ std::vector<llvm::Constant*> UsedArray;
+ UsedArray.resize(LLVMUsed.size());
+ for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+ UsedArray[i] =
+ llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
+ i8PTy);
+ }
+
+ if (UsedArray.empty())
+ return;
+ llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, UsedArray.size());
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), ATy, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray),
+ "llvm.used");
+
+ GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::EmitDeferred() {
+ // Emit code for any potentially referenced deferred decls. Since a
+ // previously unused static decl may become used during the generation of code
+ // for a static function, iterate until no changes are made.
+
+ while (!DeferredDeclsToEmit.empty() || !DeferredVTables.empty()) {
+ if (!DeferredVTables.empty()) {
+ const CXXRecordDecl *RD = DeferredVTables.back();
+ DeferredVTables.pop_back();
+ getVTables().GenerateClassData(getVTableLinkage(RD), RD);
+ continue;
+ }
+
+ GlobalDecl D = DeferredDeclsToEmit.back();
+ DeferredDeclsToEmit.pop_back();
+
+ // Check to see if we've already emitted this. This is necessary
+ // for a couple of reasons: first, decls can end up in the
+ // deferred-decls queue multiple times, and second, decls can end
+ // up with definitions in unusual ways (e.g. by an extern inline
+ // function acquiring a strong function redefinition). Just
+ // ignore these cases.
+ //
+ // TODO: That said, looking this up multiple times is very wasteful.
+ MangleBuffer Name;
+ getMangledName(Name, D);
+ llvm::GlobalValue *CGRef = GetGlobalValue(Name);
+ assert(CGRef && "Deferred decl wasn't referenced?");
+
+ if (!CGRef->isDeclaration())
+ continue;
+
+ // GlobalAlias::isDeclaration() defers to the aliasee, but for our
+ // purposes an alias counts as a definition.
+ if (isa<llvm::GlobalAlias>(CGRef))
+ continue;
+
+ // Otherwise, emit the definition and move on to the next one.
+ EmitGlobalDefinition(D);
+ }
+}
+
+/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+/// annotation information for a given GlobalValue. The annotation struct is
+/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+/// GlobalValue being annotated. The second field is the constant string
+/// created from the AnnotateAttr's annotation. The third field is a constant
+/// string containing the name of the translation unit. The fourth field is
+/// the line number in the file of the annotated value declaration.
+///
+/// FIXME: this does not unique the annotation string constants, as llvm-gcc
+/// appears to.
+///
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ unsigned LineNo) {
+ llvm::Module *M = &getModule();
+
+ // get [N x i8] constants for the annotation string, and the filename string
+ // which are the 2nd and 3rd elements of the global annotation structure.
+ const llvm::Type *SBP = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Constant *anno = llvm::ConstantArray::get(VMContext,
+ AA->getAnnotation(), true);
+ llvm::Constant *unit = llvm::ConstantArray::get(VMContext,
+ M->getModuleIdentifier(),
+ true);
+
+ // Get the two global values corresponding to the ConstantArrays we just
+ // created to hold the bytes of the strings.
+ llvm::GlobalValue *annoGV =
+ new llvm::GlobalVariable(*M, anno->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, anno,
+ GV->getName());
+ // translation unit name string, emitted into the llvm.metadata section.
+ llvm::GlobalValue *unitGV =
+ new llvm::GlobalVariable(*M, unit->getType(), false,
+ llvm::GlobalValue::PrivateLinkage, unit,
+ ".str");
+
+ // Create the ConstantStruct for the global annotation.
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantExpr::getBitCast(GV, SBP),
+ llvm::ConstantExpr::getBitCast(annoGV, SBP),
+ llvm::ConstantExpr::getBitCast(unitGV, SBP),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo)
+ };
+ return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
+}
+
+bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+ // Never defer when EmitAllDecls is specified or the decl has
+ // attribute used.
+ if (Features.EmitAllDecls || Global->hasAttr<UsedAttr>())
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Constructors and destructors should never be deferred.
+ if (FD->hasAttr<ConstructorAttr>() ||
+ FD->hasAttr<DestructorAttr>())
+ return false;
+
+ // The key function for a class must never be deferred.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Global)) {
+ const CXXRecordDecl *RD = MD->getParent();
+ if (MD->isOutOfLine() && RD->isDynamicClass()) {
+ const CXXMethodDecl *KeyFunction = getContext().getKeyFunction(RD);
+ if (KeyFunction &&
+ KeyFunction->getCanonicalDecl() == MD->getCanonicalDecl())
+ return false;
+ }
+ }
+
+ GVALinkage Linkage = GetLinkageForFunction(getContext(), FD, Features);
+
+ // static, static inline, always_inline, and extern inline functions can
+ // always be deferred. Normal inline functions can be deferred in C99/C++.
+ // Implicit template instantiations can also be deferred in C++.
+ if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
+ Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
+ return true;
+ return false;
+ }
+
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Invalid decl");
+
+ // We never want to defer structs that have non-trivial constructors or
+ // destructors.
+
+ // FIXME: Handle references.
+ if (const RecordType *RT = VD->getType()->getAs<RecordType>()) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!RD->hasTrivialConstructor() || !RD->hasTrivialDestructor())
+ return false;
+ }
+ }
+
+ // Static data may be deferred, but out-of-line static data members
+ // cannot be.
+ Linkage L = VD->getLinkage();
+ if (L == ExternalLinkage && getContext().getLangOptions().CPlusPlus &&
+ VD->getType()->getLinkage() == UniqueExternalLinkage)
+ L = UniqueExternalLinkage;
+
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ // Initializer has side effects?
+ if (VD->getInit() && VD->getInit()->HasSideEffects(Context))
+ return false;
+ return !(VD->isStaticDataMember() && VD->isOutOfLine());
+
+ case ExternalLinkage:
+ break;
+ }
+
+ return false;
+}
+
+llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
+ const AliasAttr *AA = VD->getAttr<AliasAttr>();
+ assert(AA && "No alias?");
+
+ const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
+
+ // See if there is already something with the target's name in the module.
+ llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
+
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+ if (!Entry) {
+ llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ WeakRefReferences.insert(F);
+ }
+
+ return Aliasee;
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+ const ValueDecl *Global = cast<ValueDecl>(GD.getDecl());
+
+ // Weak references don't produce any output by themselves.
+ if (Global->hasAttr<WeakRefAttr>())
+ return;
+
+ // If this is an alias definition (which otherwise looks like a declaration)
+ // emit it now.
+ if (Global->hasAttr<AliasAttr>())
+ return EmitAliasDefinition(GD);
+
+ // Ignore declarations, they will be emitted on their first use.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Forward declarations are emitted lazily on first use.
+ if (!FD->isThisDeclarationADefinition())
+ return;
+ } else {
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+ if (VD->isThisDeclarationADefinition() != VarDecl::Definition)
+ return;
+ }
+
+ // Defer code generation when possible if this is a static definition, inline
+ // function etc. These we only want to emit if they are used.
+ if (!MayDeferGeneration(Global)) {
+ // Emit the definition if it can't be deferred.
+ EmitGlobalDefinition(GD);
+ return;
+ }
+
+ // If the value has already been used, add it directly to the
+ // DeferredDeclsToEmit list.
+ MangleBuffer MangledName;
+ getMangledName(MangledName, GD);
+ if (GetGlobalValue(MangledName))
+ DeferredDeclsToEmit.push_back(GD);
+ else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+
+ PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
+ Context.getSourceManager(),
+ "Generating code for declaration");
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ return EmitCXXConstructor(CD, GD.getCtorType());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ return EmitCXXDestructor(DD, GD.getDtorType());
+
+ if (isa<FunctionDecl>(D))
+ return EmitGlobalFunctionDefinition(GD);
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return EmitGlobalVarDefinition(VD);
+
+ assert(0 && "Invalid argument to EmitGlobalDefinition()");
+}
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl());
+ if (FD && !FD->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (Entry->getType()->getElementType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(Entry, PTy);
+ }
+
+ // This function doesn't have a complete type (for example, the return
+ // type is an incomplete struct). Use a fake type instead, and make
+ // sure not to try to set attributes.
+ bool IsIncompleteFunction = false;
+
+ const llvm::FunctionType *FTy;
+ if (isa<llvm::FunctionType>(Ty)) {
+ FTy = cast<llvm::FunctionType>(Ty);
+ } else {
+ FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ std::vector<const llvm::Type*>(), false);
+ IsIncompleteFunction = true;
+ }
+ llvm::Function *F = llvm::Function::Create(FTy,
+ llvm::Function::ExternalLinkage,
+ MangledName, &getModule());
+ assert(F->getName() == MangledName && "name was uniqued!");
+ if (D.getDecl())
+ SetFunctionAttributes(D, F, IsIncompleteFunction);
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ } else if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl())) {
+ // If this the first reference to a C++ inline function in a class, queue up
+ // the deferred function body for emission. These are not seen as
+ // top-level declarations.
+ if (FD->isThisDeclarationADefinition() && MayDeferGeneration(FD))
+ DeferredDeclsToEmit.push_back(D);
+ // A called constructor which has no definition or declaration need be
+ // synthesized.
+ else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+ if (CD->isImplicit()) {
+ assert(CD->isUsed() && "Sema doesn't consider constructor as used.");
+ DeferredDeclsToEmit.push_back(D);
+ }
+ } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
+ if (DD->isImplicit()) {
+ assert(DD->isUsed() && "Sema doesn't consider destructor as used.");
+ DeferredDeclsToEmit.push_back(D);
+ }
+ } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isCopyAssignment() && MD->isImplicit()) {
+ assert(MD->isUsed() && "Sema doesn't consider CopyAssignment as used.");
+ DeferredDeclsToEmit.push_back(D);
+ }
+ }
+ }
+
+ // Make sure the result is of the requested type.
+ if (!IsIncompleteFunction) {
+ assert(F->getType()->getElementType() == Ty);
+ return F;
+ }
+
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(F, PTy);
+}
+
+/// GetAddrOfFunction - Return the address of the given function. If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty) {
+ // If there was no specific requested type, just convert it now.
+ if (!Ty)
+ Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
+ MangleBuffer MangledName;
+ getMangledName(MangledName, GD);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
+ llvm::StringRef Name) {
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl());
+}
+
+static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
+ if (!D->getType().isConstant(Context) && !D->getType()->isReferenceType())
+ return false;
+ if (Context.getLangOptions().CPlusPlus &&
+ Context.getBaseElementType(D->getType())->getAs<RecordType>()) {
+ // FIXME: We should do something fancier here!
+ return false;
+ }
+ return true;
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
+ const llvm::PointerType *Ty,
+ const VarDecl *D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ if (D && !D->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (Entry->getType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, MangledName, 0,
+ false, Ty->getAddressSpace());
+
+ // Handle things which are present even on external declarations.
+ if (D) {
+ // FIXME: This code is overly simple and should be merged with other global
+ // handling.
+ GV->setConstant(DeclIsConstantGlobal(Context, D));
+
+ // FIXME: Merge with other attribute handling code.
+ if (D->getStorageClass() == VarDecl::PrivateExtern)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ if (D->hasAttr<WeakAttr>() ||
+ D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ GV->setThreadLocal(D->isThreadSpecified());
+ }
+
+ return GV;
+}
+
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable. If Ty is non-null and if the global doesn't exist,
+/// then it will be greated with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty) {
+ assert(D->hasGlobalStorage() && "Not a global variable");
+ QualType ASTTy = D->getType();
+ if (Ty == 0)
+ Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+ const llvm::PointerType *PTy =
+ llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
+
+ MangleBuffer MangledName;
+ getMangledName(MangledName, D);
+ return GetOrCreateLLVMGlobal(MangledName, PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
+ llvm::StringRef Name) {
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+ assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+ if (MayDeferGeneration(D)) {
+ // If we have not seen a reference to this variable yet, place it
+ // into the deferred declarations table to be emitted if needed
+ // later.
+ MangleBuffer MangledName;
+ getMangledName(MangledName, D);
+ if (!GetGlobalValue(MangledName)) {
+ DeferredDecls[MangledName] = D;
+ return;
+ }
+ }
+
+ // The tentative definition is the only definition.
+ EmitGlobalVarDefinition(D);
+}
+
+void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
+ if (DefinitionRequired)
+ getVTables().GenerateClassData(getVTableLinkage(Class), Class);
+}
+
+llvm::GlobalVariable::LinkageTypes
+CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
+ if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
+ return llvm::GlobalVariable::InternalLinkage;
+
+ if (const CXXMethodDecl *KeyFunction
+ = RD->getASTContext().getKeyFunction(RD)) {
+ // If this class has a key function, use that to determine the linkage of
+ // the vtable.
+ const FunctionDecl *Def = 0;
+ if (KeyFunction->getBody(Def))
+ KeyFunction = cast<CXXMethodDecl>(Def);
+
+ switch (KeyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ if (KeyFunction->isInlined())
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ return llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+ }
+
+ switch (RD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+
+ // Silence GCC warning.
+ return llvm::GlobalVariable::WeakODRLinkage;
+}
+
+static CodeGenModule::GVALinkage
+GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
+ // If this is a static data member, compute the kind of template
+ // specialization. Otherwise, this variable is not part of a
+ // template.
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (VD->isStaticDataMember())
+ TSK = VD->getTemplateSpecializationKind();
+
+ Linkage L = VD->getLinkage();
+ if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
+ VD->getType()->getLinkage() == UniqueExternalLinkage)
+ L = UniqueExternalLinkage;
+
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return CodeGenModule::GVA_Internal;
+
+ case ExternalLinkage:
+ switch (TSK) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return CodeGenModule::GVA_StrongExternal;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Variable should not be instantiated");
+ // Fall through to treat this like any other instantiation.
+
+ case TSK_ExplicitInstantiationDefinition:
+ return CodeGenModule::GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ImplicitInstantiation:
+ return CodeGenModule::GVA_TemplateInstantiation;
+ }
+ }
+
+ return CodeGenModule::GVA_StrongExternal;
+}
+
+CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
+ return CharUnits::fromQuantity(
+ TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth());
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+ llvm::Constant *Init = 0;
+ QualType ASTTy = D->getType();
+ bool NonConstInit = false;
+
+ const Expr *InitExpr = D->getAnyInitializer();
+
+ if (!InitExpr) {
+ // This is a tentative definition; tentative definitions are
+ // implicitly initialized with { 0 }.
+ //
+ // Note that tentative definitions are only emitted at the end of
+ // a translation unit, so they should never have incomplete
+ // type. In addition, EmitTentativeDefinition makes sure that we
+ // never attempt to emit a tentative definition if a real one
+ // exists. A use may still exists, however, so we still may need
+ // to do a RAUW.
+ assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+ Init = EmitNullConstant(D->getType());
+ } else {
+ Init = EmitConstantExpr(InitExpr, D->getType());
+ if (!Init) {
+ QualType T = InitExpr->getType();
+ if (D->getType()->isReferenceType())
+ T = D->getType();
+
+ if (getLangOptions().CPlusPlus) {
+ EmitCXXGlobalVarDeclInitFunc(D);
+ Init = EmitNullConstant(T);
+ NonConstInit = true;
+ } else {
+ ErrorUnsupported(D, "static initializer");
+ Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+ }
+ }
+ }
+
+ const llvm::Type* InitType = Init->getType();
+ llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+ // all zero index gep.
+ CE->getOpcode() == llvm::Instruction::GetElementPtr);
+ Entry = CE->getOperand(0);
+ }
+
+ // Entry is now either a Function or GlobalVariable.
+ llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+ // We have a definition after a declaration with the wrong type.
+ // We must make a new GlobalVariable* and update everything that used OldGV
+ // (a declaration or tentative definition) with the new GlobalVariable*
+ // (which will be a definition).
+ //
+ // This happens if there is a prototype for a global (e.g.
+ // "extern int x[];") and then a definition of a different type (e.g.
+ // "int x[10];"). This also happens when an initializer has a different type
+ // from the type of the global (this happens with unions).
+ if (GV == 0 ||
+ GV->getType()->getElementType() != InitType ||
+ GV->getType()->getAddressSpace() != ASTTy.getAddressSpace()) {
+
+ // Move the old entry aside so that we'll create a new one.
+ Entry->setName(llvm::StringRef());
+
+ // Make a new global with the correct type, this is now guaranteed to work.
+ GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+ }
+
+ if (const AnnotateAttr *AA = D->getAttr<AnnotateAttr>()) {
+ SourceManager &SM = Context.getSourceManager();
+ AddAnnotation(EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D->getLocation())));
+ }
+
+ GV->setInitializer(Init);
+
+ // If it is safe to mark the global 'constant', do so now.
+ GV->setConstant(false);
+ if (!NonConstInit && DeclIsConstantGlobal(Context, D))
+ GV->setConstant(true);
+
+ GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
+
+ // Set the llvm linkage type as appropriate.
+ GVALinkage Linkage = GetLinkageForVariable(getContext(), D);
+ if (Linkage == GVA_Internal)
+ GV->setLinkage(llvm::Function::InternalLinkage);
+ else if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::Function::DLLImportLinkage);
+ else if (D->hasAttr<DLLExportAttr>())
+ GV->setLinkage(llvm::Function::DLLExportLinkage);
+ else if (D->hasAttr<WeakAttr>()) {
+ if (GV->isConstant())
+ GV->setLinkage(llvm::GlobalVariable::WeakODRLinkage);
+ else
+ GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ } else if (Linkage == GVA_TemplateInstantiation ||
+ Linkage == GVA_ExplicitTemplateInstantiation)
+ // FIXME: It seems like we can provide more specific linkage here
+ // (LinkOnceODR, WeakODR).
+ GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon &&
+ !D->hasExternalStorage() && !D->getInit() &&
+ !D->getAttr<SectionAttr>()) {
+ GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
+ // common vars aren't constant even if declared const.
+ GV->setConstant(false);
+ } else
+ GV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D->getLocation());
+ DI->EmitGlobalVariable(GV, D);
+ }
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}". If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn) {
+ // If we're redefining a global as a function, don't transform it.
+ llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
+ if (OldFn == 0) return;
+
+ const llvm::Type *NewRetTy = NewFn->getReturnType();
+ llvm::SmallVector<llvm::Value*, 4> ArgList;
+
+ for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
+ UI != E; ) {
+ // TODO: Do invokes ever occur in C code? If so, we should handle them too.
+ llvm::Value::use_iterator I = UI++; // Increment before the CI is erased.
+ llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*I);
+ llvm::CallSite CS(CI);
+ if (!CI || !CS.isCallee(I)) continue;
+
+ // If the return types don't match exactly, and if the call isn't dead, then
+ // we can't transform this call.
+ if (CI->getType() != NewRetTy && !CI->use_empty())
+ continue;
+
+ // If the function was passed too few arguments, don't transform. If extra
+ // arguments were passed, we silently drop them. If any of the types
+ // mismatch, we don't transform.
+ unsigned ArgNo = 0;
+ bool DontTransform = false;
+ for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
+ E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
+ if (CS.arg_size() == ArgNo ||
+ CS.getArgument(ArgNo)->getType() != AI->getType()) {
+ DontTransform = true;
+ break;
+ }
+ }
+ if (DontTransform)
+ continue;
+
+ // Okay, we can transform this. Create the new call instruction and copy
+ // over the required information.
+ ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
+ ArgList.end(), "", CI);
+ ArgList.clear();
+ if (!NewCall->getType()->isVoidTy())
+ NewCall->takeName(CI);
+ NewCall->setAttributes(CI->getAttributes());
+ NewCall->setCallingConv(CI->getCallingConv());
+
+ // Finally, remove the old call, replacing any uses with the new one.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewCall);
+
+ // Copy debug location attached to CI.
+ if (!CI->getDebugLoc().isUnknown())
+ NewCall->setDebugLoc(CI->getDebugLoc());
+ CI->eraseFromParent();
+ }
+}
+
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+ const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
+ const llvm::FunctionType *Ty = getTypes().GetFunctionType(GD);
+ getMangleContext().mangleInitDiscriminator();
+ // Get or create the prototype for the function.
+ llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
+ llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // F is the Function* for the one with the wrong type, we must make a new
+ // Function* and update everything that used F (a declaration) with the new
+ // Function* (which will be a definition).
+ //
+ // This happens if there is a prototype for a function
+ // (e.g. "int f()") and then a definition of a different type
+ // (e.g. "int f(int x)"). Move the old function aside so that it
+ // doesn't interfere with GetAddrOfFunction.
+ OldFn->setName(llvm::StringRef());
+ llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
+
+ // If this is an implementation of a function without a prototype, try to
+ // replace any existing uses of the function (which may be calls) with uses
+ // of the new function
+ if (D->getType()->isFunctionNoProtoType()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
+ OldFn->removeDeadConstantUsers();
+ }
+
+ // Replace uses of F with the Function we will endow with a body.
+ if (!Entry->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Ok, delete the old function now, which is dead.
+ OldFn->eraseFromParent();
+
+ Entry = NewFn;
+ }
+
+ llvm::Function *Fn = cast<llvm::Function>(Entry);
+ setFunctionLinkage(D, Fn);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+ if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+ AddGlobalCtor(Fn, CA->getPriority());
+ if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+ AddGlobalDtor(Fn, DA->getPriority());
+}
+
+void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ assert(AA && "Not an alias?");
+
+ MangleBuffer MangledName;
+ getMangledName(MangledName, GD);
+
+ // If there is a definition in the module, then it wins over the alias.
+ // This is dubious, but allow it to be safe. Just ignore the alias.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return;
+
+ const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+ // Create a reference to the named value. This ensures that it is emitted
+ // if a deferred decl.
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl());
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+
+ // Create the new alias itself, but don't set a name yet.
+ llvm::GlobalValue *GA =
+ new llvm::GlobalAlias(Aliasee->getType(),
+ llvm::Function::ExternalLinkage,
+ "", Aliasee, &getModule());
+
+ if (Entry) {
+ assert(Entry->isDeclaration());
+
+ // If there is a declaration in the module, then we had an extern followed
+ // by the alias, as in:
+ // extern int test6();
+ // ...
+ // int test6() __attribute__((alias("test7")));
+ //
+ // Remove it and replace uses of it with the alias.
+ GA->takeName(Entry);
+
+ Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+ Entry->getType()));
+ Entry->eraseFromParent();
+ } else {
+ GA->setName(MangledName.getString());
+ }
+
+ // Set attributes which are particular to an alias; this is a
+ // specialization of the attributes which may be set on a global
+ // variable/function.
+ if (D->hasAttr<DLLExportAttr>()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // The dllexport attribute is ignored for undefined symbols.
+ if (FD->getBody())
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ } else {
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ }
+ } else if (D->hasAttr<WeakAttr>() ||
+ D->hasAttr<WeakRefAttr>() ||
+ D->hasAttr<WeakImportAttr>()) {
+ GA->setLinkage(llvm::Function::WeakAnyLinkage);
+ }
+
+ SetCommonAttributes(D, GA);
+}
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
+ assert((Context.BuiltinInfo.isLibFunction(BuiltinID) ||
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
+ "isn't a lib fn");
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ const char *Name = Context.BuiltinInfo.GetName(BuiltinID);
+ if (Context.BuiltinInfo.isLibFunction(BuiltinID))
+ Name += 10;
+
+ const llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+ return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD));
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
+ unsigned NumTys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(),
+ (llvm::Intrinsic::ID)IID, Tys, NumTys);
+}
+
+
+llvm::Function *CodeGenModule::getMemCpyFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType) {
+ const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
+ return getIntrinsic(llvm::Intrinsic::memcpy, ArgTypes, 3);
+}
+
+llvm::Function *CodeGenModule::getMemMoveFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType) {
+ const llvm::Type *ArgTypes[3] = {DestType, SrcType, SizeType };
+ return getIntrinsic(llvm::Intrinsic::memmove, ArgTypes, 3);
+}
+
+llvm::Function *CodeGenModule::getMemSetFn(const llvm::Type *DestType,
+ const llvm::Type *SizeType) {
+ const llvm::Type *ArgTypes[2] = { DestType, SizeType };
+ return getIntrinsic(llvm::Intrinsic::memset, ArgTypes, 2);
+}
+
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+ const StringLiteral *Literal,
+ bool TargetIsLSB,
+ bool &IsUTF16,
+ unsigned &StringLength) {
+ unsigned NumBytes = Literal->getByteLength();
+
+ // Check for simple case.
+ if (!Literal->containsNonAsciiOrNull()) {
+ StringLength = NumBytes;
+ return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
+ StringLength));
+ }
+
+ // Otherwise, convert the UTF8 literals into a byte string.
+ llvm::SmallVector<UTF16, 128> ToBuf(NumBytes);
+ const UTF8 *FromPtr = (UTF8 *)Literal->getStrData();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+
+ // Check for conversion failure.
+ if (Result != conversionOK) {
+ // FIXME: Have Sema::CheckObjCString() validate the UTF-8 string and remove
+ // this duplicate code.
+ assert(Result == sourceIllegal && "UTF-8 to UTF-16 conversion failed");
+ StringLength = NumBytes;
+ return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
+ StringLength));
+ }
+
+ // ConvertUTF8toUTF16 returns the length in ToPtr.
+ StringLength = ToPtr - &ToBuf[0];
+
+ // Render the UTF-16 string into a byte array and convert to the target byte
+ // order.
+ //
+ // FIXME: This isn't something we should need to do here.
+ llvm::SmallString<128> AsBytes;
+ AsBytes.reserve(StringLength * 2);
+ for (unsigned i = 0; i != StringLength; ++i) {
+ unsigned short Val = ToBuf[i];
+ if (TargetIsLSB) {
+ AsBytes.push_back(Val & 0xFF);
+ AsBytes.push_back(Val >> 8);
+ } else {
+ AsBytes.push_back(Val >> 8);
+ AsBytes.push_back(Val & 0xFF);
+ }
+ }
+ // Append one extra null character, the second is automatically added by our
+ // caller.
+ AsBytes.push_back(0);
+
+ IsUTF16 = true;
+ return Map.GetOrCreateValue(llvm::StringRef(AsBytes.data(), AsBytes.size()));
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getTargetData().isLittleEndian(),
+ isUTF16, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero =
+ llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get __CFConstantStringClassReference.
+ if (!CFConstantStringClassRef) {
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ "__CFConstantStringClassReference");
+ // Decay array -> ptr
+ CFConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ }
+
+ QualType CFTy = getContext().getCFConstantStringType();
+
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+ std::vector<llvm::Constant*> Fields(4);
+
+ // Class pointer.
+ Fields[0] = CFConstantStringClassRef;
+
+ // Flags.
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
+ llvm::ConstantInt::get(Ty, 0x07C8);
+
+ // String pointer.
+ llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ if (isUTF16) {
+ // FIXME: why do utf strings get "_" labels instead of "L" labels?
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ // Note: -fwritable-strings doesn't make unicode CFStrings writable, but
+ // does make plain ascii ones writable.
+ isConstant = true;
+ } else {
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !Features.WritableStrings;
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+ ".str");
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ }
+ Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+
+ // String length.
+ Ty = getTypes().ConvertType(getContext().LongTy);
+ Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_cfstring_");
+ if (const char *Sect = getContext().Target.getCFStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getTargetData().isLittleEndian(),
+ isUTF16, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero =
+ llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get _NSConstantStringClassReference.
+ if (!NSConstantStringClassRef) {
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ Features.ObjCNonFragileABI ?
+ "OBJC_CLASS_$_NSConstantString" :
+ "_NSConstantStringClassReference");
+ // Decay array -> ptr
+ NSConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ }
+
+ QualType NSTy = getContext().getNSConstantStringType();
+
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+
+ std::vector<llvm::Constant*> Fields(3);
+
+ // Class pointer.
+ Fields[0] = NSConstantStringClassRef;
+
+ // String pointer.
+ llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ if (isUTF16) {
+ // FIXME: why do utf strings get "_" labels instead of "L" labels?
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ // Note: -fwritable-strings doesn't make unicode NSStrings writable, but
+ // does make plain ascii ones writable.
+ isConstant = true;
+ } else {
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !Features.WritableStrings;
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+ ".str");
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ }
+ Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+
+ // String length.
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_nsstring_");
+ // FIXME. Fix section.
+ if (const char *Sect =
+ Features.ObjCNonFragileABI
+ ? getContext().Target.getNSStringNonFragileABISection()
+ : getContext().Target.getNSStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+/// GetStringForStringLiteral - Return the appropriate bytes for a
+/// string literal, properly padded to match the literal type.
+std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
+ const char *StrData = E->getStrData();
+ unsigned Len = E->getByteLength();
+
+ const ConstantArrayType *CAT =
+ getContext().getAsConstantArrayType(E->getType());
+ assert(CAT && "String isn't pointer or array!");
+
+ // Resize the string to the right size.
+ std::string Str(StrData, StrData+Len);
+ uint64_t RealLen = CAT->getSize().getZExtValue();
+
+ if (E->isWide())
+ RealLen *= getContext().Target.getWCharWidth()/8;
+
+ Str.resize(RealLen, '\0');
+
+ return Str;
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+ // FIXME: This can be more efficient.
+ // FIXME: We shouldn't need to bitcast the constant in the wide string case.
+ llvm::Constant *C = GetAddrOfConstantString(GetStringForStringLiteral(S));
+ if (S->isWide()) {
+ llvm::Type *DestTy =
+ llvm::PointerType::getUnqual(getTypes().ConvertType(S->getType()));
+ C = llvm::ConstantExpr::getBitCast(C, DestTy);
+ }
+ return C;
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+ std::string Str;
+ getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+ return GetAddrOfConstantCString(Str);
+}
+
+
+/// GenerateWritableString -- Creates storage for a string literal.
+static llvm::Constant *GenerateStringLiteral(const std::string &str,
+ bool constant,
+ CodeGenModule &CGM,
+ const char *GlobalName) {
+ // Create Constant for this string literal. Don't add a '\0'.
+ llvm::Constant *C =
+ llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
+
+ // Create a global variable for this string
+ return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+}
+
+/// GetAddrOfConstantString - Returns a pointer to a character array
+/// containing the literal. This contents are exactly that of the
+/// given string, i.e. it will not be null terminated automatically;
+/// see GetAddrOfConstantCString. Note that whether the result is
+/// actually a pointer to an LLVM constant depends on
+/// Feature.WriteableStrings.
+///
+/// The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str,
+ const char *GlobalName) {
+ bool IsConstant = !Features.WritableStrings;
+
+ // Get the default prefix if a name wasn't specified.
+ if (!GlobalName)
+ GlobalName = ".str";
+
+ // Don't share any string literals if strings aren't constant.
+ if (!IsConstant)
+ return GenerateStringLiteral(str, false, *this, GlobalName);
+
+ llvm::StringMapEntry<llvm::Constant *> &Entry =
+ ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+
+ if (Entry.getValue())
+ return Entry.getValue();
+
+ // Create a global variable for this.
+ llvm::Constant *C = GenerateStringLiteral(str, true, *this, GlobalName);
+ Entry.setValue(C);
+ return C;
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character
+/// array containing the literal and a terminating '\-'
+/// character. The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName){
+ return GetAddrOfConstantString(str + '\0', GlobalName);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+ ObjCImplementationDecl *D) {
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ // Dynamic is just for type-checking.
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ // Determine which methods need to be implemented, some may have
+ // been overridden. Note that ::isSynthesized is not the method
+ // we want, that just indicates if the decl came from a
+ // property. What we want to know is if the method is defined in
+ // this implementation.
+ if (!D->getInstanceMethod(PD->getGetterName()))
+ CodeGenFunction(*this).GenerateObjCGetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ if (!PD->isReadOnly() &&
+ !D->getInstanceMethod(PD->getSetterName()))
+ CodeGenFunction(*this).GenerateObjCSetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ }
+ }
+}
+
+/// EmitObjCIvarInitializations - Emit information for ivar initialization
+/// for an implementation.
+void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
+ if (!Features.NeXTRuntime || D->getNumIvarInitializers() == 0)
+ return;
+ DeclContext* DC = const_cast<DeclContext*>(dyn_cast<DeclContext>(D));
+ assert(DC && "EmitObjCIvarInitializations - null DeclContext");
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(), cxxSelector,
+ getContext().VoidTy, 0,
+ DC, true, false, true,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(DTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+
+ II = &getContext().Idents.get(".cxx_construct");
+ cxxSelector = getContext().Selectors.getSelector(0, &II);
+ // The constructor returns 'self'.
+ ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(), cxxSelector,
+ getContext().getObjCIdType(), 0,
+ DC, true, false, true,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(CTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+
+
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+ for (RecordDecl::decl_iterator I = ND->decls_begin(), E = ND->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+ if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
+ LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
+ ErrorUnsupported(LSD, "linkage spec");
+ return;
+ }
+
+ for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+ // If an error has occurred, stop code generation, but continue
+ // parsing and semantic analysis (to ensure all warnings and errors
+ // are emitted).
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Ignore dependent declarations.
+ if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
+ return;
+
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+ return;
+
+ EmitGlobal(cast<FunctionDecl>(D));
+ break;
+
+ case Decl::Var:
+ EmitGlobal(cast<VarDecl>(D));
+ break;
+
+ // C++ Decls
+ case Decl::Namespace:
+ EmitNamespace(cast<NamespaceDecl>(D));
+ break;
+ // No code generation needed.
+ case Decl::UsingShadow:
+ case Decl::Using:
+ case Decl::UsingDirective:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::NamespaceAlias:
+ break;
+ case Decl::CXXConstructor:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+ return;
+
+ EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+ break;
+ case Decl::CXXDestructor:
+ EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+ break;
+
+ case Decl::StaticAssert:
+ // Nothing to do.
+ break;
+
+ // Objective-C Decls
+
+ // Forward declarations, no (immediate) code generation.
+ case Decl::ObjCClass:
+ case Decl::ObjCForwardProtocol:
+ case Decl::ObjCCategory:
+ case Decl::ObjCInterface:
+ break;
+
+ case Decl::ObjCProtocol:
+ Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+ break;
+
+ case Decl::ObjCCategoryImpl:
+ // Categories have properties but don't support synthesize so we
+ // can ignore them here.
+ Runtime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ break;
+
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+ EmitObjCPropertyImplementations(OMD);
+ EmitObjCIvarInitializations(OMD);
+ Runtime->GenerateClass(OMD);
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
+ // If this is not a prototype, emit the body.
+ if (OMD->getBody())
+ CodeGenFunction(*this).GenerateObjCMethod(OMD);
+ break;
+ }
+ case Decl::ObjCCompatibleAlias:
+ // compatibility-alias is a directive and has no code gen.
+ break;
+
+ case Decl::LinkageSpec:
+ EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+ break;
+
+ case Decl::FileScopeAsm: {
+ FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
+ llvm::StringRef AsmString = AD->getAsmString()->getString();
+
+ const std::string &S = getModule().getModuleInlineAsm();
+ if (S.empty())
+ getModule().setModuleInlineAsm(AsmString);
+ else
+ getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
+ break;
+ }
+
+ default:
+ // Make sure we handled everything we should, every other kind is a
+ // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
+ // function. Need to recode Decl::Kind to do that easily.
+ assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..319744c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,577 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENMODULE_H
+#define CLANG_CODEGEN_CODEGENMODULE_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "CGBlocks.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGVTables.h"
+#include "CGCXXABI.h"
+#include "CodeGenTypes.h"
+#include "GlobalDecl.h"
+#include "Mangle.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/ValueHandle.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class LLVMContext;
+}
+
+namespace clang {
+ class TargetCodeGenInfo;
+ class ASTContext;
+ class FunctionDecl;
+ class IdentifierInfo;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCProtocolDecl;
+ class ObjCEncodeExpr;
+ class BlockExpr;
+ class CharUnits;
+ class Decl;
+ class Expr;
+ class Stmt;
+ class StringLiteral;
+ class NamedDecl;
+ class ValueDecl;
+ class VarDecl;
+ class LangOptions;
+ class CodeGenOptions;
+ class Diagnostic;
+ class AnnotateAttr;
+ class CXXDestructorDecl;
+
+namespace CodeGen {
+
+ class CodeGenFunction;
+ class CGDebugInfo;
+ class CGObjCRuntime;
+ class MangleBuffer;
+
+/// CodeGenModule - This class organizes the cross-function state that is used
+/// while generating LLVM code.
+class CodeGenModule : public BlockModule {
+ CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+
+ typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
+
+ ASTContext &Context;
+ const LangOptions &Features;
+ const CodeGenOptions &CodeGenOpts;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
+ Diagnostic &Diags;
+ CodeGenTypes Types;
+
+ /// VTables - Holds information about C++ vtables.
+ CodeGenVTables VTables;
+ friend class CodeGenVTables;
+
+ CGObjCRuntime* Runtime;
+ CXXABI* ABI;
+ CGDebugInfo* DebugInfo;
+
+ // WeakRefReferences - A set of references that have only been seen via
+ // a weakref so far. This is used to remove the weak of the reference if we ever
+ // see a direct reference or a definition.
+ llvm::SmallPtrSet<llvm::GlobalValue*, 10> WeakRefReferences;
+
+ /// DeferredDecls - This contains all the decls which have definitions but
+ /// which are deferred for emission and therefore should only be output if
+ /// they are actually used. If a decl is in this, then it is known to have
+ /// not been referenced yet.
+ llvm::StringMap<GlobalDecl> DeferredDecls;
+
+ /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
+ /// that *are* actually referenced. These get code generated when the module
+ /// is done.
+ std::vector<GlobalDecl> DeferredDeclsToEmit;
+
+ /// LLVMUsed - List of global values which are required to be
+ /// present in the object file; bitcast to i8*. This is used for
+ /// forcing visibility of symbols which may otherwise be optimized
+ /// out.
+ std::vector<llvm::WeakVH> LLVMUsed;
+
+ /// GlobalCtors - Store the list of global constructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalCtors;
+
+ /// GlobalDtors - Store the list of global destructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalDtors;
+
+ std::vector<llvm::Constant*> Annotations;
+
+ llvm::StringMap<llvm::Constant*> CFConstantStringMap;
+ llvm::StringMap<llvm::Constant*> ConstantStringMap;
+ llvm::DenseMap<const Decl*, llvm::Value*> StaticLocalDeclMap;
+
+ /// CXXGlobalInits - Global variables with initializers that need to run
+ /// before main.
+ std::vector<llvm::Constant*> CXXGlobalInits;
+
+ /// CXXGlobalDtors - Global destructor functions and arguments that need to
+ /// run on termination.
+ std::vector<std::pair<llvm::Constant*,llvm::Constant*> > CXXGlobalDtors;
+
+ /// CFConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *CFConstantStringClassRef;
+
+ /// NSConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *NSConstantStringClassRef;
+
+ /// Lazily create the Objective-C runtime
+ void createObjCRuntime();
+ /// Lazily create the C++ ABI
+ void createCXXABI();
+
+ llvm::LLVMContext &VMContext;
+public:
+ CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
+ llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
+
+ ~CodeGenModule();
+
+ /// Release - Finalize LLVM code generation.
+ void Release();
+
+ /// getObjCRuntime() - Return a reference to the configured
+ /// Objective-C runtime.
+ CGObjCRuntime &getObjCRuntime() {
+ if (!Runtime) createObjCRuntime();
+ return *Runtime;
+ }
+
+ /// hasObjCRuntime() - Return true iff an Objective-C runtime has
+ /// been configured.
+ bool hasObjCRuntime() { return !!Runtime; }
+
+ /// getCXXABI() - Return a reference to the configured
+ /// C++ ABI.
+ CXXABI &getCXXABI() {
+ if (!ABI) createCXXABI();
+ return *ABI;
+ }
+
+ /// hasCXXABI() - Return true iff a C++ ABI has been configured.
+ bool hasCXXABI() { return !!ABI; }
+
+ llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
+ return StaticLocalDeclMap[VD];
+ }
+ void setStaticLocalDeclAddress(const VarDecl *D,
+ llvm::GlobalVariable *GV) {
+ StaticLocalDeclMap[D] = GV;
+ }
+
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ ASTContext &getContext() const { return Context; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ const LangOptions &getLangOptions() const { return Features; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ MangleContext &getMangleContext() {
+ if (!ABI) createCXXABI();
+ return ABI->getMangleContext();
+ }
+ CodeGenVTables &getVTables() { return VTables; }
+ Diagnostic &getDiags() const { return Diags; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ llvm::LLVMContext &getLLVMContext() { return VMContext; }
+ const TargetCodeGenInfo &getTargetCodeGenInfo() const;
+ bool isTargetDarwin() const;
+
+ /// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
+ LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
+
+ /// setGlobalVisibility - Set the visibility for the given LLVM
+ /// GlobalValue.
+ void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
+ if (isa<CXXConstructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXConstructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ GD.getCtorType());
+ else if (isa<CXXDestructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXDestructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ GD.getDtorType());
+ else if (isa<FunctionDecl>(GD.getDecl()))
+ return GetAddrOfFunction(GD);
+ else
+ return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
+ }
+
+ /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+ /// given global variable. If Ty is non-null and if the global doesn't exist,
+ /// then it will be greated with the specified type instead of whatever the
+ /// normal requested type would be.
+ llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty = 0);
+
+ /// GetAddrOfFunction - Return the address of the given function. If Ty is
+ /// non-null, then this function will use the specified type if it has to
+ /// create it.
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty = 0);
+
+ /// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
+ /// for the given type.
+ llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
+
+ /// GetAddrOfThunk - Get the address of the thunk for the given global decl.
+ llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// GetWeakRefReference - Get a reference to the target of VD.
+ llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
+
+ /// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to
+ /// a class. Returns null if the offset is 0.
+ llvm::Constant *
+ GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ const CXXBaseSpecifierArray &BasePath);
+
+ /// GetStringForStringLiteral - Return the appropriate bytes for a string
+ /// literal, properly padded to match the literal type. If only the address of
+ /// a constant is needed consider using GetAddrOfConstantStringLiteral.
+ std::string GetStringForStringLiteral(const StringLiteral *E);
+
+ /// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantNSString - Return a pointer to a constant NSString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantNSString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
+ /// for the given string literal.
+ llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+
+ /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+ /// array for the given ObjCEncodeExpr node.
+ llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+ /// GetAddrOfConstantString - Returns a pointer to a character array
+ /// containing the literal. This contents are exactly that of the given
+ /// string, i.e. it will not be null terminated automatically; see
+ /// GetAddrOfConstantCString. Note that whether the result is actually a
+ /// pointer to an LLVM constant depends on Feature.WriteableStrings.
+ ///
+ /// The result has pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global
+ /// (if one is created).
+ llvm::Constant *GetAddrOfConstantString(const std::string& str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfConstantCString - Returns a pointer to a character array
+ /// containing the literal and a terminating '\0' character. The result has
+ /// pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global (if one is
+ /// created).
+ llvm::Constant *GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfCXXConstructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+
+ /// GetAddrOfCXXDestructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ /// getBuiltinLibFunction - Given a builtin id for a function like
+ /// "__builtin_fabsf", return a Function* for "fabsf".
+ llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID);
+
+ llvm::Function *getMemCpyFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType);
+
+ llvm::Function *getMemMoveFn(const llvm::Type *DestType,
+ const llvm::Type *SrcType,
+ const llvm::Type *SizeType);
+
+ llvm::Function *getMemSetFn(const llvm::Type *DestType,
+ const llvm::Type *SizeType);
+
+ llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
+ unsigned NumTys = 0);
+
+ /// EmitTopLevelDecl - Emit code for a single top level declaration.
+ void EmitTopLevelDecl(Decl *D);
+
+ /// AddUsedGlobal - Add a global which should be forced to be
+ /// present in the object file; these are emitted to the llvm.used
+ /// metadata global.
+ void AddUsedGlobal(llvm::GlobalValue *GV);
+
+ void AddAnnotation(llvm::Constant *C) { Annotations.push_back(C); }
+
+ /// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
+ /// destructor function.
+ void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object);
+
+ /// CreateRuntimeFunction - Create a new runtime function with the specified
+ /// type and name.
+ llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
+ llvm::StringRef Name);
+ /// CreateRuntimeVariable - Create a new runtime global variable with the
+ /// specified type and name.
+ llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
+ llvm::StringRef Name);
+
+ void UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+ }
+
+ /// EmitConstantExpr - Try to emit the given expression as a
+ /// constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitNullConstant - Return the result of value-initializing the given
+ /// type, i.e. a null expression of the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstant(QualType T);
+
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA, unsigned LineNo);
+
+ llvm::Constant *EmitPointerToDataMember(const FieldDecl *FD);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified decl yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError=false);
+
+ /// SetInternalFunctionAttributes - Set the attributes on the LLVM
+ /// function for the given decl and function info. This applies
+ /// attributes necessary for handling the ABI as well as user
+ /// specified attributes like section.
+ void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ const CGFunctionInfo &FI);
+
+ /// SetLLVMFunctionAttributes - Set the LLVM function attributes
+ /// (sext, zext, etc).
+ void SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F);
+
+ /// SetLLVMFunctionAttributesForDefinition - Set the LLVM function attributes
+ /// which only apply to a function definintion.
+ void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+ /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used
+ /// as a return type.
+ bool ReturnTypeUsesSret(const CGFunctionInfo &FI);
+
+ /// ConstructAttributeList - Get the LLVM attributes and calling convention to
+ /// use for a particular function type.
+ ///
+ /// \param Info - The function type information.
+ /// \param TargetDecl - The decl these attributes are being constructed
+ /// for. If supplied the attributes applied to this decl may contribute to the
+ /// function attributes and calling convention.
+ /// \param PAL [out] - On return, the attribute list to use.
+ /// \param CallingConv [out] - On return, the LLVM calling convention to use.
+ void ConstructAttributeList(const CGFunctionInfo &Info,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv);
+
+ void getMangledName(MangleBuffer &Buffer, GlobalDecl D);
+ void getMangledName(MangleBuffer &Buffer, const NamedDecl *ND);
+ void getMangledName(MangleBuffer &Buffer, const BlockDecl *BD);
+ void getMangledCXXCtorName(MangleBuffer &Buffer,
+ const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ void getMangledCXXDtorName(MangleBuffer &Buffer,
+ const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ void EmitTentativeDefinition(const VarDecl *D);
+
+ void EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired);
+
+ enum GVALinkage {
+ GVA_Internal,
+ GVA_C99Inline,
+ GVA_CXXInline,
+ GVA_StrongExternal,
+ GVA_TemplateInstantiation,
+ GVA_ExplicitTemplateInstantiation
+ };
+
+ llvm::GlobalVariable::LinkageTypes
+ getFunctionLinkage(const FunctionDecl *FD);
+
+ void setFunctionLinkage(const FunctionDecl *FD, llvm::GlobalValue *V) {
+ V->setLinkage(getFunctionLinkage(FD));
+ }
+
+ /// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
+ /// and type information of the given class.
+ static llvm::GlobalVariable::LinkageTypes
+ getVTableLinkage(const CXXRecordDecl *RD);
+
+ /// GetTargetTypeStoreSize - Return the store size, in character units, of
+ /// the given LLVM type.
+ CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
+
+ std::vector<const CXXRecordDecl*> DeferredVTables;
+
+private:
+ llvm::GlobalValue *GetGlobalValue(llvm::StringRef Ref);
+
+ llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D);
+ llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
+ const llvm::PointerType *PTy,
+ const VarDecl *D);
+
+ /// SetCommonAttributes - Set attributes which are common to any
+ /// form of a global definition (alias, Objective-C method,
+ /// function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// SetFunctionDefinitionAttributes - Set attributes for a global definition.
+ void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV);
+
+ /// SetFunctionAttributes - Set function attributes for a function
+ /// declaration.
+ void SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction);
+
+ /// EmitGlobal - Emit code for a singal global function or var decl. Forward
+ /// declarations are emitted lazily.
+ void EmitGlobal(GlobalDecl D);
+
+ void EmitGlobalDefinition(GlobalDecl D);
+
+ void EmitGlobalFunctionDefinition(GlobalDecl GD);
+ void EmitGlobalVarDefinition(const VarDecl *D);
+ void EmitAliasDefinition(GlobalDecl GD);
+ void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+ void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
+
+ // C++ related functions.
+
+ bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
+ bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
+
+ void EmitNamespace(const NamespaceDecl *D);
+ void EmitLinkageSpec(const LinkageSpecDecl *D);
+
+ /// EmitCXXConstructors - Emit constructors (base, complete) from a
+ /// C++ constructor Decl.
+ void EmitCXXConstructors(const CXXConstructorDecl *D);
+
+ /// EmitCXXConstructor - Emit a single constructor with the given type from
+ /// a C++ constructor Decl.
+ void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
+
+ /// EmitCXXDestructors - Emit destructors (base, complete) from a
+ /// C++ destructor Decl.
+ void EmitCXXDestructors(const CXXDestructorDecl *D);
+
+ /// EmitCXXDestructor - Emit a single destructor with the given type from
+ /// a C++ destructor Decl.
+ void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ /// EmitCXXGlobalInitFunc - Emit the function that initializes C++ globals.
+ void EmitCXXGlobalInitFunc();
+
+ /// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
+ void EmitCXXGlobalDtorFunc();
+
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
+
+ /// EmitCtorList - Generates a global array of functions and priorities using
+ /// the given list and name. This array will have appending linkage and is
+ /// suitable for use as a LLVM constructor or destructor array.
+ void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+ void EmitAnnotations(void);
+
+ /// EmitFundamentalRTTIDescriptor - Emit the RTTI descriptors for the
+ /// given type.
+ void EmitFundamentalRTTIDescriptor(QualType Type);
+
+ /// EmitFundamentalRTTIDescriptors - Emit the RTTI descriptors for the
+ /// builtin types.
+ void EmitFundamentalRTTIDescriptors();
+
+ /// EmitDeferred - Emit any needed decls for which code generation
+ /// was deferred.
+ void EmitDeferred(void);
+
+ /// EmitLLVMUsed - Emit the llvm.used metadata used to force
+ /// references to global which may otherwise be optimized out.
+ void EmitLLVMUsed(void);
+
+ /// MayDeferGeneration - Determine if the given decl can be emitted
+ /// lazily; this is only relevant for definitions. The given decl
+ /// must be either a function or var decl.
+ bool MayDeferGeneration(const ValueDecl *D);
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..a46dc72
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,501 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "CGCall.h"
+#include "CGRecordLayout.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
+ const llvm::TargetData &TD, const ABIInfo &Info)
+ : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
+ TheABIInfo(Info) {
+}
+
+CodeGenTypes::~CodeGenTypes() {
+ for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+ I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+ I != E; ++I)
+ delete I->second;
+
+ for (llvm::FoldingSet<CGFunctionInfo>::iterator
+ I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
+ delete &*I++;
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ llvm::PATypeHolder Result = ConvertTypeRecursive(T);
+
+ // Any pointers that were converted defered evaluation of their pointee type,
+ // creating an opaque type instead. This is in order to avoid problems with
+ // circular types. Loop through all these defered pointees, if any, and
+ // resolve them now.
+ while (!PointersToResolve.empty()) {
+ std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
+
+ // We can handle bare pointers here because we know that the only pointers
+ // to the Opaque type are P.second and from other types. Refining the
+ // opqaue type away will invalidate P.second, but we don't mind :).
+ const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
+ P.second->refineAbstractTypeTo(NT);
+ }
+
+ return Result;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ // See if type is already cached.
+ llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
+ I = TypeCache.find(T.getTypePtr());
+ // If type is found in map and this is not a definition for a opaque
+ // place holder type then use it. Otherwise, convert type T.
+ if (I != TypeCache.end())
+ return I->second.get();
+
+ const llvm::Type *ResultType = ConvertNewType(T);
+ TypeCache.insert(std::make_pair(T.getTypePtr(),
+ llvm::PATypeHolder(ResultType)));
+ return ResultType;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
+ const llvm::Type *ResultType = ConvertTypeRecursive(T);
+ if (ResultType->isIntegerTy(1))
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+ // FIXME: Should assert that the llvm type and AST type has the same size.
+ return ResultType;
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type. For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
+ const llvm::Type *R = ConvertType(T);
+
+ // If this is a non-bool type, don't map it.
+ if (!R->isIntegerTy(1))
+ return R;
+
+ // Otherwise, return an integer of the target-specified size.
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+
+}
+
+// Code to verify a given function type is complete, i.e. the return type
+// and all of the argument types are complete.
+static const TagType *VerifyFuncTypeComplete(const Type* T) {
+ const FunctionType *FT = cast<FunctionType>(T);
+ if (const TagType* TT = FT->getResultType()->getAs<TagType>())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
+ for (unsigned i = 0; i < FPT->getNumArgs(); i++)
+ if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ return 0;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+ if (TDTI == TagDeclTypes.end()) return;
+
+ // Remember the opaque LLVM type for this tagdecl.
+ llvm::PATypeHolder OpaqueHolder = TDTI->second;
+ assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
+ "Updating compilation of an already non-opaque type?");
+
+ // Remove it from TagDeclTypes so that it will be regenerated.
+ TagDeclTypes.erase(TDTI);
+
+ // Generate the new type.
+ const llvm::Type *NT = ConvertTagDeclType(TD);
+
+ // Refine the old opaque type to its new definition.
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
+
+ // Since we just completed a tag type, check to see if any function types
+ // were completed along with the tag type.
+ // FIXME: This is very inefficient; if we track which function types depend
+ // on which tag types, though, it should be reasonably efficient.
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
+ for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
+ if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
+ // This function type still depends on an incomplete tag type; make sure
+ // that tag type has an associated opaque type.
+ ConvertTagDeclType(TT->getDecl());
+ } else {
+ // This function no longer depends on an incomplete tag type; create the
+ // function type, and refine the opaque type to the new function type.
+ llvm::PATypeHolder OpaqueHolder = i->second;
+ const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
+ FunctionTypes.erase(i);
+ }
+ }
+}
+
+static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
+ const llvm::fltSemantics &format) {
+ if (&format == &llvm::APFloat::IEEEsingle)
+ return llvm::Type::getFloatTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEdouble)
+ return llvm::Type::getDoubleTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEquad)
+ return llvm::Type::getFP128Ty(VMContext);
+ if (&format == &llvm::APFloat::PPCDoubleDouble)
+ return llvm::Type::getPPC_FP128Ty(VMContext);
+ if (&format == &llvm::APFloat::x87DoubleExtended)
+ return llvm::Type::getX86_FP80Ty(VMContext);
+ assert(0 && "Unknown float format!");
+ return 0;
+}
+
+const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
+ const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
+
+ switch (Ty.getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical or dependent types aren't possible.");
+ break;
+
+ case Type::Builtin: {
+ switch (cast<BuiltinType>(Ty).getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ // LLVM void type can only be used as the result of a function call. Just
+ // map to the same as char.
+ return llvm::IntegerType::get(getLLVMContext(), 8);
+
+ case BuiltinType::Bool:
+ // Note that we always return bool as i1 for use as a scalar type.
+ return llvm::Type::getInt1Ty(getLLVMContext());
+
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::WChar:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ return llvm::IntegerType::get(getLLVMContext(),
+ static_cast<unsigned>(Context.getTypeSize(T)));
+
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ return getTypeForFormat(getLLVMContext(),
+ Context.getFloatTypeSemantics(T));
+
+ case BuiltinType::NullPtr: {
+ // Model std::nullptr_t as i8*
+ const llvm::Type *Ty = llvm::IntegerType::get(getLLVMContext(), 8);
+ return llvm::PointerType::getUnqual(Ty);
+ }
+
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ return llvm::IntegerType::get(getLLVMContext(), 128);
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::UndeducedAuto:
+ assert(0 && "Unexpected builtin type!");
+ break;
+ }
+ assert(0 && "Unknown builtin type!");
+ break;
+ }
+ case Type::Complex: {
+ const llvm::Type *EltTy =
+ ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
+ return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType &RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+ case Type::Pointer: {
+ const PointerType &PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+
+ case Type::VariableArray: {
+ const VariableArrayType &A = cast<VariableArrayType>(Ty);
+ assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ return ConvertTypeForMemRecursive(A.getElementType());
+ }
+ case Type::IncompleteArray: {
+ const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
+ assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // int X[] -> [0 x int]
+ return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
+ }
+ case Type::ConstantArray: {
+ const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
+ const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
+ return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType &VT = cast<VectorType>(Ty);
+ return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
+ VT.getNumElements());
+ }
+ case Type::FunctionNoProto:
+ case Type::FunctionProto: {
+ // First, check whether we can build the full function type.
+ if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
+ // This function's type depends on an incomplete tag type; make sure
+ // we have an opaque type corresponding to the tag type.
+ ConvertTagDeclType(TT->getDecl());
+ // Create an opaque type for this function type, save it, and return it.
+ llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
+ FunctionTypes.insert(std::make_pair(&Ty, ResultType));
+ return ResultType;
+ }
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
+ return GetFunctionType(getFunctionInfo(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT,0))),
+ FPT->isVariadic());
+
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ return GetFunctionType(getFunctionInfo(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT,0))),
+ true);
+ }
+
+ case Type::ObjCObject:
+ return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());
+
+ case Type::ObjCInterface: {
+ // Objective-C interfaces are always opaque (outside of the
+ // runtime, which can do whatever it likes); we never refine
+ // these.
+ const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
+ if (!T)
+ T = llvm::OpaqueType::get(getLLVMContext());
+ return T;
+ }
+
+ case Type::ObjCObjectPointer: {
+ // Protocol qualifications do not influence the LLVM type, we just return a
+ // pointer to the underlying interface type. We don't need to worry about
+ // recursive conversion.
+ const llvm::Type *T =
+ ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
+ return llvm::PointerType::getUnqual(T);
+ }
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *TD = cast<TagType>(Ty).getDecl();
+ const llvm::Type *Res = ConvertTagDeclType(TD);
+
+ std::string TypeName(TD->getKindName());
+ TypeName += '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (TD->getIdentifier())
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ TypeName += TD->getDeclContext() ? TD->getQualifiedNameAsString() :
+ TD->getNameAsString();
+ else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ TypeName += TdT->getDecl()->getDeclContext() ?
+ TdT->getDecl()->getQualifiedNameAsString() :
+ TdT->getDecl()->getNameAsString();
+ else
+ TypeName += "anon";
+
+ TheModule.addTypeName(TypeName, Res);
+ return Res;
+ }
+
+ case Type::BlockPointer: {
+ const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+ PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
+ return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
+ }
+
+ case Type::MemberPointer: {
+ // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
+ // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
+ // If we ever want to support other ABIs this needs to be abstracted.
+
+ QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
+ const llvm::Type *PtrDiffTy =
+ ConvertTypeRecursive(Context.getPointerDiffType());
+ if (ETy->isFunctionType())
+ return llvm::StructType::get(TheModule.getContext(), PtrDiffTy, PtrDiffTy,
+ NULL);
+ return PtrDiffTy;
+ }
+ }
+
+ // FIXME: implement.
+ return llvm::OpaqueType::get(getLLVMContext());
+}
+
+/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+/// enum.
+const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
+ // TagDecl's are not necessarily unique, instead use the (clang)
+ // type connected to the decl.
+ const Type *Key =
+ Context.getTagDeclType(TD).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+
+ // If we've already compiled this tag type, use the previous definition.
+ if (TDTI != TagDeclTypes.end())
+ return TDTI->second;
+
+ // If this is still a forward declaration, just define an opaque
+ // type to use for this tagged decl.
+ if (!TD->isDefinition()) {
+ llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
+ TagDeclTypes.insert(std::make_pair(Key, ResultType));
+ return ResultType;
+ }
+
+ // Okay, this is a definition of a type. Compile the implementation now.
+
+ if (TD->isEnum()) // Don't bother storing enums in TagDeclTypes.
+ return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
+
+ // This decl could well be recursive. In this case, insert an opaque
+ // definition of this type, which the recursive uses will get. We will then
+ // refine this opaque version later.
+
+ // Create new OpaqueType now for later use in case this is a recursive
+ // type. This will later be refined to the actual type.
+ llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
+ TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
+
+ const RecordDecl *RD = cast<const RecordDecl>(TD);
+
+ // Force conversion of non-virtual base classes recursively.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ if (!i->isVirtual()) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+ ConvertTagDeclType(Base);
+ }
+ }
+ }
+
+ // Layout fields.
+ CGRecordLayout *Layout = ComputeRecordLayout(RD);
+
+ CGRecordLayouts[Key] = Layout;
+ const llvm::Type *ResultType = Layout->getLLVMType();
+
+ // Refine our Opaque type to ResultType. This can invalidate ResultType, so
+ // make sure to read the result out of the holder.
+ cast<llvm::OpaqueType>(ResultHolder.get())
+ ->refineAbstractTypeTo(ResultType);
+
+ return ResultHolder.get();
+}
+
+/// getCGRecordLayout - Return record layout info for the given llvm::Type.
+const CGRecordLayout &
+CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
+ const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+ const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ assert(Layout && "Unable to find record layout information for type");
+ return *Layout;
+}
+
+bool CodeGenTypes::ContainsPointerToDataMember(QualType T) {
+ // No need to check for member pointers when not compiling C++.
+ if (!Context.getLangOptions().CPlusPlus)
+ return false;
+
+ T = Context.getBaseElementType(T);
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ return ContainsPointerToDataMember(RD);
+ }
+
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+ return !MPT->getPointeeType()->isFunctionType();
+
+ return false;
+}
+
+bool CodeGenTypes::ContainsPointerToDataMember(const CXXRecordDecl *RD) {
+
+ // FIXME: It would be better if there was a way to explicitly compute the
+ // record layout instead of converting to a type.
+ ConvertTagDeclType(RD);
+
+ const CGRecordLayout &Layout = getCGRecordLayout(RD);
+ return Layout.containsPointerToDataMember();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..fc28c3a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,202 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTYPES_H
+#define CLANG_CODEGEN_CODEGENTYPES_H
+
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+
+#include "CGCall.h"
+#include "GlobalDecl.h"
+
+namespace llvm {
+ class FunctionType;
+ class Module;
+ class OpaqueType;
+ class PATypeHolder;
+ class TargetData;
+ class Type;
+ class LLVMContext;
+}
+
+namespace clang {
+ class ABIInfo;
+ class ASTContext;
+ template <typename> class CanQual;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class FieldDecl;
+ class FunctionProtoType;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class PointerType;
+ class QualType;
+ class RecordDecl;
+ class TagDecl;
+ class TargetInfo;
+ class Type;
+ typedef CanQual<Type> CanQualType;
+
+namespace CodeGen {
+ class CGRecordLayout;
+
+/// CodeGenTypes - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTypes {
+ ASTContext &Context;
+ const TargetInfo &Target;
+ llvm::Module& TheModule;
+ const llvm::TargetData& TheTargetData;
+ const ABIInfo& TheABIInfo;
+
+ llvm::SmallVector<std::pair<QualType,
+ llvm::OpaqueType *>, 8> PointersToResolve;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes;
+
+ /// The opaque type map for Objective-C interfaces. All direct
+ /// manipulation is done by the runtime interfaces, which are
+ /// responsible for coercing to the appropriate type; these opaque
+ /// types are never refined.
+ llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
+
+ /// CGRecordLayouts - This maps llvm struct type with corresponding
+ /// record layout info.
+ llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+ /// FunctionInfos - Hold memoized CGFunctionInfo results.
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+private:
+ /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
+ /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
+ /// used instead of llvm::Type because it allows us to bypass potential
+ /// dangling type pointers due to type refinement on llvm side.
+ llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
+
+ /// ConvertNewType - Convert type T into a llvm::Type. Do not use this
+ /// method directly because it does not do any type caching. This method
+ /// is available only for ConvertType(). CovertType() is preferred
+ /// interface to convert type T into a llvm::Type.
+ const llvm::Type *ConvertNewType(QualType T);
+public:
+ CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
+ const ABIInfo &Info);
+ ~CodeGenTypes();
+
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Target; }
+ ASTContext &getContext() const { return Context; }
+ const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
+
+ /// ConvertType - Convert type T into a llvm::Type.
+ const llvm::Type *ConvertType(QualType T);
+ const llvm::Type *ConvertTypeRecursive(QualType T);
+
+ /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+ /// ConvertType in that it is used to convert to the memory representation for
+ /// a type. For example, the scalar representation for _Bool is i1, but the
+ /// memory representation is usually i8 or i32, depending on the target.
+ const llvm::Type *ConvertTypeForMem(QualType T);
+ const llvm::Type *ConvertTypeForMemRecursive(QualType T);
+
+ /// GetFunctionType - Get the LLVM function type for \arg Info.
+ const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
+ bool IsVariadic);
+
+ const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+
+
+ /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
+ /// given a CXXMethodDecl. If the method to has an incomplete return type,
+ /// and/or incomplete argument types, this will return the opaque type.
+ const llvm::Type *GetFunctionTypeForVTable(const CXXMethodDecl *MD);
+
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
+
+ /// UpdateCompletedType - When we find the full definition for a TagDecl,
+ /// replace the 'opaque' type we previously made for it if applicable.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ /// getFunctionInfo - Get the function info for the specified function decl.
+ const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
+
+ const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
+ const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const CGFunctionInfo &getFunctionInfo(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ const CGFunctionInfo &getFunctionInfo(const CallArgList &Args,
+ const FunctionType *Ty) {
+ return getFunctionInfo(Ty->getResultType(), Args,
+ Ty->getExtInfo());
+ }
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
+
+ // getFunctionInfo - Get the function info for a member function.
+ const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP);
+
+ /// getFunctionInfo - Get the function info for a function described by a
+ /// return type and argument types. If the calling convention is not
+ /// specified, the "C" calling convention will be used.
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const CallArgList &Args,
+ const FunctionType::ExtInfo &Info);
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info);
+
+ /// Retrieves the ABI information for the given function signature.
+ ///
+ /// \param ArgTys - must all actually be canonical as params
+ const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ const FunctionType::ExtInfo &Info);
+
+ /// \brief Compute a new LLVM record layout object for the given record.
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D);
+
+public: // These are internal details of CGT that shouldn't be used externally.
+ /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+ /// enum.
+ const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
+
+ /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as on the provided vector \arg
+ /// ArgTys. See ABIArgInfo::Expand.
+ void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys);
+
+ /// ContainsPointerToDataMember - Return whether the given type contains a
+ /// pointer to a data member.
+ bool ContainsPointerToDataMember(QualType T);
+
+ /// ContainsPointerToDataMember - Return whether the record decl contains a
+ /// pointer to a data member.
+ bool ContainsPointerToDataMember(const CXXRecordDecl *RD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
new file mode 100644
index 0000000..b8a98d7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
@@ -0,0 +1,113 @@
+//===--- GlobalDecl.h - Global declaration holder ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A GlobalDecl can hold either a regular variable/function or a C++ ctor/dtor
+// together with its type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_GLOBALDECL_H
+#define CLANG_CODEGEN_GLOBALDECL_H
+
+#include "CGCXX.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+
+namespace clang {
+
+namespace CodeGen {
+
+/// GlobalDecl - represents a global declaration. This can either be a
+/// CXXConstructorDecl and the constructor type (Base, Complete).
+/// a CXXDestructorDecl and the destructor type (Base, Complete) or
+/// a VarDecl, a FunctionDecl or a BlockDecl.
+class GlobalDecl {
+ llvm::PointerIntPair<const Decl*, 2> Value;
+
+ void Init(const Decl *D) {
+ assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!");
+ assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!");
+
+ Value.setPointer(D);
+ }
+
+public:
+ GlobalDecl() {}
+
+ GlobalDecl(const VarDecl *D) { Init(D);}
+ GlobalDecl(const FunctionDecl *D) { Init(D); }
+ GlobalDecl(const BlockDecl *D) { Init(D); }
+ GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
+
+ GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
+ : Value(D, Type) {}
+ GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
+ : Value(D, Type) {}
+
+ const Decl *getDecl() const { return Value.getPointer(); }
+
+ CXXCtorType getCtorType() const {
+ assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
+ return static_cast<CXXCtorType>(Value.getInt());
+ }
+
+ CXXDtorType getDtorType() const {
+ assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
+ return static_cast<CXXDtorType>(Value.getInt());
+ }
+
+ friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
+ return LHS.Value == RHS.Value;
+ }
+
+ void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
+
+ static GlobalDecl getFromOpaquePtr(void *P) {
+ GlobalDecl GD;
+ GD.Value.setFromOpaqueValue(P);
+ return GD;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+ template<class> struct DenseMapInfo;
+
+ template<> struct DenseMapInfo<clang::CodeGen::GlobalDecl> {
+ static inline clang::CodeGen::GlobalDecl getEmptyKey() {
+ return clang::CodeGen::GlobalDecl();
+ }
+
+ static inline clang::CodeGen::GlobalDecl getTombstoneKey() {
+ return clang::CodeGen::GlobalDecl::
+ getFromOpaquePtr(reinterpret_cast<void*>(-1));
+ }
+
+ static unsigned getHashValue(clang::CodeGen::GlobalDecl GD) {
+ return DenseMapInfo<void*>::getHashValue(GD.getAsOpaquePtr());
+ }
+
+ static bool isEqual(clang::CodeGen::GlobalDecl LHS,
+ clang::CodeGen::GlobalDecl RHS) {
+ return LHS == RHS;
+ }
+
+ };
+
+ // GlobalDecl isn't *technically* a POD type. However, its copy constructor,
+ // copy assignment operator, and destructor are all trivial.
+ template <>
+ struct isPodLike<clang::CodeGen::GlobalDecl> {
+ static const bool value = true;
+ };
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..98db75e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -0,0 +1,39 @@
+//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targetting the Itanium C++ ABI. The class
+// in this file generates structures that follow the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+
+using namespace clang;
+
+namespace {
+class ItaniumCXXABI : public CodeGen::CXXABI {
+ CodeGen::MangleContext MangleCtx;
+public:
+ ItaniumCXXABI(CodeGen::CodeGenModule &CGM) :
+ MangleCtx(CGM.getContext(), CGM.getDiags()) { }
+
+ CodeGen::MangleContext &getMangleContext() {
+ return MangleCtx;
+ }
+};
+}
+
+CodeGen::CXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
+ return new ItaniumCXXABI(CGM);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Makefile b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
new file mode 100644
index 0000000..3cea6bb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
@@ -0,0 +1,25 @@
+##===- clang/lib/CodeGen/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST -> LLVM code generation library for the
+# C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangCodeGen
+BUILD_ARCHIVE = 1
+
+CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+ifdef CLANG_VENDOR
+CPP.Flags += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
+endif
+
+include $(LEVEL)/Makefile.common
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
new file mode 100644
index 0000000..6c2a648
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
@@ -0,0 +1,2201 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "CGVTables.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+using namespace CodeGen;
+
+MiscNameMangler::MiscNameMangler(MangleContext &C,
+ llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res) { }
+
+void MiscNameMangler::mangleBlock(const BlockDecl *BD) {
+ // Mangle the context of the block.
+ // FIXME: We currently mimic GCC's mangling scheme, which leaves much to be
+ // desired. Come up with a better mangling scheme.
+ const DeclContext *DC = BD->getDeclContext();
+ while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
+ DC = DC->getParent();
+ if (DC->isFunctionOrMethod()) {
+ Out << "__";
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (IdentifierInfo *II = ND->getIdentifier())
+ Out << II->getName();
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ llvm::SmallString<64> Buffer;
+ Context.mangleName(ND, Buffer);
+ Out << Buffer;
+ }
+ }
+ Out << "_block_invoke_" << Context.getBlockId(BD, true);
+ } else {
+ Out << "__block_global_" << Context.getBlockId(BD, false);
+ }
+}
+
+void MiscNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Name;
+ llvm::raw_svector_ostream OS(Name);
+
+ const ObjCContainerDecl *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << '(' << CID << ')';
+ OS << ' ' << MD->getSelector().getAsString() << ']';
+
+ Out << OS.str().size() << OS.str();
+}
+
+namespace {
+
+static const DeclContext *GetLocalClassFunctionDeclContext(
+ const DeclContext *DC) {
+ if (isa<CXXRecordDecl>(DC)) {
+ while (!DC->isNamespace() && !DC->isTranslationUnit() &&
+ !isa<FunctionDecl>(DC))
+ DC = DC->getParent();
+ if (isa<FunctionDecl>(DC))
+ return DC;
+ }
+ return 0;
+}
+
+static const CXXMethodDecl *getStructor(const CXXMethodDecl *MD) {
+ assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
+ "Passed in decl is not a ctor or dtor!");
+
+ if (const TemplateDecl *TD = MD->getPrimaryTemplate()) {
+ MD = cast<CXXMethodDecl>(TD->getTemplatedDecl());
+
+ assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
+ "Templated decl is not a ctor or dtor!");
+ }
+
+ return MD;
+}
+
+static const unsigned UnknownArity = ~0U;
+
+/// CXXNameMangler - Manage the mangling of a single name.
+class CXXNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+
+ const CXXMethodDecl *Structor;
+ unsigned StructorType;
+
+ llvm::DenseMap<uintptr_t, unsigned> Substitutions;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res), Structor(0), StructorType(0) { }
+ CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+ const CXXConstructorDecl *D, CXXCtorType Type)
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+ CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+ const CXXDestructorDecl *D, CXXDtorType Type)
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+
+#if MANGLE_CHECKER
+ ~CXXNameMangler() {
+ if (Out.str()[0] == '\01')
+ return;
+
+ int status = 0;
+ char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
+ assert(status == 0 && "Could not demangle mangled name!");
+ free(result);
+ }
+#endif
+ llvm::raw_svector_ostream &getStream() { return Out; }
+
+ void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
+ void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(int64_t Number);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleName(const NamedDecl *ND);
+ void mangleType(QualType T);
+ void mangleNameOrStandardSubstitution(const NamedDecl *ND);
+
+private:
+ bool mangleSubstitution(const NamedDecl *ND);
+ bool mangleSubstitution(QualType T);
+ bool mangleSubstitution(TemplateName Template);
+ bool mangleSubstitution(uintptr_t Ptr);
+
+ bool mangleStandardSubstitution(const NamedDecl *ND);
+
+ void addSubstitution(const NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+
+ addSubstitution(reinterpret_cast<uintptr_t>(ND));
+ }
+ void addSubstitution(QualType T);
+ void addSubstitution(TemplateName Template);
+ void addSubstitution(uintptr_t Ptr);
+
+ void mangleUnresolvedScope(NestedNameSpecifier *Qualifier);
+ void mangleUnresolvedName(NestedNameSpecifier *Qualifier,
+ DeclarationName Name,
+ unsigned KnownArity = UnknownArity);
+
+ void mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+ unsigned KnownArity);
+ void mangleUnscopedName(const NamedDecl *ND);
+ void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleUnscopedTemplateName(TemplateName);
+ void mangleSourceName(const IdentifierInfo *II);
+ void mangleLocalName(const NamedDecl *ND);
+ void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
+ bool NoFunction=false);
+ void mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void manglePrefix(const DeclContext *DC, bool NoFunction=false);
+ void mangleTemplatePrefix(const TemplateDecl *ND);
+ void mangleTemplatePrefix(TemplateName Template);
+ void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+ void mangleQualifiers(Qualifiers Quals);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType);
+
+ void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
+ void mangleMemberExpr(const Expr *Base, bool IsArrow,
+ NestedNameSpecifier *Qualifier,
+ DeclarationName Name,
+ unsigned KnownArity);
+ void mangleCalledExpression(const Expr *E, unsigned KnownArity);
+ void mangleExpression(const Expr *E);
+ void mangleCXXCtorType(CXXCtorType T);
+ void mangleCXXDtorType(CXXDtorType T);
+
+ void mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL);
+ void mangleTemplateArg(const NamedDecl *P, const TemplateArgument &A);
+
+ void mangleTemplateParameter(unsigned Index);
+};
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOptions().CPlusPlus)
+ return false;
+
+ // Variables at global scope with non-internal linkage are not mangled
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ // Check for extern variable declared locally.
+ if (isa<FunctionDecl>(DC) && D->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+ if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
+ return false;
+ }
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) {
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= _Z <encoding>
+ // ::= <data name>
+ // ::= <special-name>
+ Out << Prefix;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleName(VD);
+ else
+ mangleName(cast<FieldDecl>(D));
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <encoding> ::= <function name> <bare-function-type>
+ mangleName(FD);
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // Whether the mangling of a function type includes the return type depends on
+ // the context and the nature of the function. The rules for deciding whether
+ // the return type is included are:
+ //
+ // 1. Template functions (names or types) have return types encoded, with
+ // the exceptions listed below.
+ // 2. Function types not appearing as part of a function name mangling,
+ // e.g. parameters, pointer types, etc., have return type encoded, with the
+ // exceptions listed below.
+ // 3. Non-template function names do not have return types encoded.
+ //
+ // The exceptions mentioned in (1) and (2) above, for which the return type is
+ // never included, are
+ // 1. Constructors.
+ // 2. Destructors.
+ // 3. Conversion operator functions, e.g. operator int.
+ bool MangleReturnType = false;
+ if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
+ if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
+ isa<CXXConversionDecl>(FD)))
+ MangleReturnType = true;
+
+ // Mangle the type of the primary template.
+ FD = PrimaryTemplate->getTemplatedDecl();
+ }
+
+ // Do the canonicalization out here because parameter types can
+ // undergo additional canonicalization (e.g. array decay).
+ FunctionType *FT = cast<FunctionType>(Context.getASTContext()
+ .getCanonicalType(FD->getType()));
+
+ mangleBareFunctionType(FT, MangleReturnType);
+}
+
+/// isStd - Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+ const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ return II && II->isStr("std");
+}
+
+static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
+ while (isa<LinkageSpecDecl>(DC)) {
+ DC = DC->getParent();
+ }
+
+ return DC;
+}
+
+// isStdNamespace - Return whether a given decl context is a toplevel 'std'
+// namespace.
+static bool isStdNamespace(const DeclContext *DC) {
+ if (!DC->isNamespace())
+ return false;
+
+ if (!IgnoreLinkageSpecDecls(DC->getParent())->isTranslationUnit())
+ return false;
+
+ return isStd(cast<NamespaceDecl>(DC));
+}
+
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+ // Check if we have a function template.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
+ if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+ TemplateArgs = FD->getTemplateSpecializationArgs();
+ return TD;
+ }
+ }
+
+ // Check if we have a class template.
+ if (const ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ return 0;
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <nested-name>
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name>
+ //
+ const DeclContext *DC = ND->getDeclContext();
+
+ if (GetLocalClassFunctionDeclContext(DC)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ return;
+ }
+
+ mangleUnscopedName(ND);
+ return;
+ }
+
+ if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ mangleNestedName(ND, DC);
+}
+void CXXNameMangler::mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ const DeclContext *DC = IgnoreLinkageSpecDecls(TD->getDeclContext());
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+ } else {
+ mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
+ }
+}
+
+void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name> # ::std::
+ if (isStdNamespace(ND->getDeclContext()))
+ Out << "St";
+
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ mangleUnscopedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleUnscopedTemplateName(TD);
+
+ if (mangleSubstitution(Template))
+ return;
+
+ // FIXME: How to cope with operators here?
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Not a dependent template name?");
+ if (!Dependent->isIdentifier()) {
+ // FIXME: We can't possibly know the arity of the operator here!
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot mangle dependent operator name");
+ Diags.Report(FullSourceLoc(), DiagID);
+ return;
+ }
+
+ mangleSourceName(Dependent->getIdentifier());
+ addSubstitution(Template);
+}
+
+void CXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [n] <non-negative decimal integer>
+ if (Number < 0) {
+ Out << 'n';
+ Number = -Number;
+ }
+
+ Out << Number;
+}
+
+void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
+ // <call-offset> ::= h <nv-offset> _
+ // ::= v <v-offset> _
+ // <nv-offset> ::= <offset number> # non-virtual base override
+ // <v-offset> ::= <offset number> _ <virtual offset number>
+ // # virtual base override, with vcall offset
+ if (!Virtual) {
+ Out << 'h';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ return;
+ }
+
+ Out << 'v';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ mangleNumber(Virtual);
+ Out << '_';
+}
+
+void CXXNameMangler::mangleUnresolvedScope(NestedNameSpecifier *Qualifier) {
+ Qualifier = getASTContext().getCanonicalNestedNameSpecifier(Qualifier);
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // nothing
+ break;
+ case NestedNameSpecifier::Namespace:
+ mangleName(Qualifier->getAsNamespace());
+ break;
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const Type *QTy = Qualifier->getAsType();
+
+ if (const TemplateSpecializationType *TST =
+ dyn_cast<TemplateSpecializationType>(QTy)) {
+ if (!mangleSubstitution(QualType(TST, 0))) {
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
+ TST->getNumArgs());
+ addSubstitution(QualType(TST, 0));
+ }
+ } else {
+ // We use the QualType mangle type variant here because it handles
+ // substitutions.
+ mangleType(QualType(QTy, 0));
+ }
+ }
+ break;
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes.
+ if (Qualifier->getPrefix())
+ mangleUnresolvedScope(Qualifier->getPrefix());
+ mangleSourceName(Qualifier->getAsIdentifier());
+ break;
+ }
+}
+
+/// Mangles a name which was not resolved to a specific entity.
+void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *Qualifier,
+ DeclarationName Name,
+ unsigned KnownArity) {
+ if (Qualifier)
+ mangleUnresolvedScope(Qualifier);
+ // FIXME: ambiguity of unqualified lookup with ::
+
+ mangleUnqualifiedName(0, Name, KnownArity);
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name,
+ unsigned KnownArity) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ // We must avoid conflicts between internally- and externally-
+ // linked variable declaration names in the same TU.
+ // This naming convention is the same as that followed by GCC, though it
+ // shouldn't actually matter.
+ if (ND && isa<VarDecl>(ND) && ND->getLinkage() == InternalLinkage &&
+ ND->getDeclContext()->isFileContext())
+ Out << 'L';
+
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ // This is how gcc mangles these names.
+ Out << "12_GLOBAL__N_1";
+ break;
+ }
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // Get a unique id for the anonymous struct.
+ uint64_t AnonStructId = Context.getAnonymousStructId(TD);
+
+ // Mangle it as a source name in the form
+ // [n] $_<id>
+ // where n is the length of the string.
+ llvm::SmallString<8> Str;
+ Str += "$_";
+ Str += llvm::utostr(AnonStructId);
+
+ Out << Str.size();
+ Out << Str.str();
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ constructor we're mangling, use the type
+ // we were given.
+ mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+ else
+ // Otherwise, use the complete constructor name. This is relevant if a
+ // class with a constructor is declared within a constructor.
+ mangleCXXCtorType(Ctor_Complete);
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling, use the type we
+ // were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the complete destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Complete);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= cv <type> # (cast)
+ Out << "cv";
+ mangleType(Context.getASTContext().getCanonicalType(Name.getCXXNameType()));
+ break;
+
+ case DeclarationName::CXXOperatorName: {
+ unsigned Arity;
+ if (ND) {
+ Arity = cast<FunctionDecl>(ND)->getNumParams();
+
+ // If we have a C++ member function, we need to include the 'this' pointer.
+ // FIXME: This does not make sense for operators that are static, but their
+ // names stay the same regardless of the arity (operator new for instance).
+ if (isa<CXXMethodDecl>(ND))
+ Arity++;
+ } else
+ Arity = KnownArity;
+
+ mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
+ break;
+ }
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: This mangling is not yet official.
+ Out << "li";
+ mangleSourceName(Name.getCXXLiteralIdentifier());
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+ const DeclContext *DC,
+ bool NoFunction) {
+ // <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+ Out << 'N';
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND))
+ mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else {
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ Out << 'E';
+}
+void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+ Out << 'N';
+
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
+ // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+ // := Z <function encoding> E s [<discriminator>]
+ // <discriminator> := _ <non-negative number>
+ const DeclContext *DC = ND->getDeclContext();
+ Out << 'Z';
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(MD);
+ }
+ else if (const DeclContext *CDC = GetLocalClassFunctionDeclContext(DC)) {
+ mangleFunctionEncoding(cast<FunctionDecl>(CDC));
+ Out << 'E';
+ mangleNestedName(ND, DC, true /*NoFunction*/);
+
+ // FIXME. This still does not cover all cases.
+ unsigned disc;
+ if (Context.getNextDiscriminator(ND, disc)) {
+ if (disc < 10)
+ Out << '_' << disc;
+ else
+ Out << "__" << disc << '_';
+ }
+
+ return;
+ }
+ else
+ mangleFunctionEncoding(cast<FunctionDecl>(DC));
+
+ Out << 'E';
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
+ // <prefix> ::= <prefix> <unqualified-name>
+ // ::= <template-prefix> <template-args>
+ // ::= <template-param>
+ // ::= # empty
+ // ::= <substitution>
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
+ manglePrefix(DC->getParent(), NoFunction);
+ llvm::SmallString<64> Name;
+ Context.mangleBlock(Block, Name);
+ Out << Name.size() << Name;
+ return;
+ }
+
+ if (mangleSubstitution(cast<NamedDecl>(DC)))
+ return;
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else if(NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ manglePrefix(DC->getParent(), NoFunction);
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ }
+
+ addSubstitution(cast<NamedDecl>(DC));
+}
+
+void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplatePrefix(TD);
+
+ if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
+ mangleUnresolvedScope(Qualified->getQualifier());
+
+ if (OverloadedTemplateStorage *Overloaded
+ = Template.getAsOverloadedTemplate()) {
+ mangleUnqualifiedName(0, (*Overloaded->begin())->getDeclName(),
+ UnknownArity);
+ return;
+ }
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Unknown template name kind?");
+ mangleUnresolvedScope(Dependent->getQualifier());
+ mangleUnscopedTemplateName(Template);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ // <template-template-param> ::= <template-param>
+ // <substitution>
+
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ manglePrefix(ND->getDeclContext());
+ mangleUnqualifiedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+ switch (OO) {
+ // <operator-name> ::= nw # new
+ case OO_New: Out << "nw"; break;
+ // ::= na # new[]
+ case OO_Array_New: Out << "na"; break;
+ // ::= dl # delete
+ case OO_Delete: Out << "dl"; break;
+ // ::= da # delete[]
+ case OO_Array_Delete: Out << "da"; break;
+ // ::= ps # + (unary)
+ // ::= pl # +
+ case OO_Plus:
+ assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+ Out << (Arity == 1? "ps" : "pl"); break;
+ // ::= ng # - (unary)
+ // ::= mi # -
+ case OO_Minus:
+ assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+ Out << (Arity == 1? "ng" : "mi"); break;
+ // ::= ad # & (unary)
+ // ::= an # &
+ case OO_Amp:
+ assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+ Out << (Arity == 1? "ad" : "an"); break;
+ // ::= de # * (unary)
+ // ::= ml # *
+ case OO_Star:
+ assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+ Out << (Arity == 1? "de" : "ml"); break;
+ // ::= co # ~
+ case OO_Tilde: Out << "co"; break;
+ // ::= dv # /
+ case OO_Slash: Out << "dv"; break;
+ // ::= rm # %
+ case OO_Percent: Out << "rm"; break;
+ // ::= or # |
+ case OO_Pipe: Out << "or"; break;
+ // ::= eo # ^
+ case OO_Caret: Out << "eo"; break;
+ // ::= aS # =
+ case OO_Equal: Out << "aS"; break;
+ // ::= pL # +=
+ case OO_PlusEqual: Out << "pL"; break;
+ // ::= mI # -=
+ case OO_MinusEqual: Out << "mI"; break;
+ // ::= mL # *=
+ case OO_StarEqual: Out << "mL"; break;
+ // ::= dV # /=
+ case OO_SlashEqual: Out << "dV"; break;
+ // ::= rM # %=
+ case OO_PercentEqual: Out << "rM"; break;
+ // ::= aN # &=
+ case OO_AmpEqual: Out << "aN"; break;
+ // ::= oR # |=
+ case OO_PipeEqual: Out << "oR"; break;
+ // ::= eO # ^=
+ case OO_CaretEqual: Out << "eO"; break;
+ // ::= ls # <<
+ case OO_LessLess: Out << "ls"; break;
+ // ::= rs # >>
+ case OO_GreaterGreater: Out << "rs"; break;
+ // ::= lS # <<=
+ case OO_LessLessEqual: Out << "lS"; break;
+ // ::= rS # >>=
+ case OO_GreaterGreaterEqual: Out << "rS"; break;
+ // ::= eq # ==
+ case OO_EqualEqual: Out << "eq"; break;
+ // ::= ne # !=
+ case OO_ExclaimEqual: Out << "ne"; break;
+ // ::= lt # <
+ case OO_Less: Out << "lt"; break;
+ // ::= gt # >
+ case OO_Greater: Out << "gt"; break;
+ // ::= le # <=
+ case OO_LessEqual: Out << "le"; break;
+ // ::= ge # >=
+ case OO_GreaterEqual: Out << "ge"; break;
+ // ::= nt # !
+ case OO_Exclaim: Out << "nt"; break;
+ // ::= aa # &&
+ case OO_AmpAmp: Out << "aa"; break;
+ // ::= oo # ||
+ case OO_PipePipe: Out << "oo"; break;
+ // ::= pp # ++
+ case OO_PlusPlus: Out << "pp"; break;
+ // ::= mm # --
+ case OO_MinusMinus: Out << "mm"; break;
+ // ::= cm # ,
+ case OO_Comma: Out << "cm"; break;
+ // ::= pm # ->*
+ case OO_ArrowStar: Out << "pm"; break;
+ // ::= pt # ->
+ case OO_Arrow: Out << "pt"; break;
+ // ::= cl # ()
+ case OO_Call: Out << "cl"; break;
+ // ::= ix # []
+ case OO_Subscript: Out << "ix"; break;
+
+ // ::= qu # ?
+ // The conditional operator can't be overloaded, but we still handle it when
+ // mangling expressions.
+ case OO_Conditional: Out << "qu"; break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ if (Quals.hasRestrict())
+ Out << 'r';
+ if (Quals.hasVolatile())
+ Out << 'V';
+ if (Quals.hasConst())
+ Out << 'K';
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Buffer;
+ MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
+ Out << Buffer;
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = Context.getASTContext().getCanonicalType(T);
+
+ bool IsSubstitutable = T.hasLocalQualifiers() || !isa<BuiltinType>(T);
+ if (IsSubstitutable && mangleSubstitution(T))
+ return;
+
+ if (Qualifiers Quals = T.getLocalQualifiers()) {
+ mangleQualifiers(Quals);
+ // Recurse: even if the qualified type isn't yet substitutable,
+ // the unqualified type might be.
+ mangleType(T.getLocalUnqualifiedType());
+ } else {
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+ return;
+#define TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+ // Add the substitution.
+ if (IsSubstitutable)
+ addSubstitution(T);
+}
+
+void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
+ if (!mangleStandardSubstitution(ND))
+ mangleName(ND);
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= v # void
+ // ::= w # wchar_t
+ // ::= b # bool
+ // ::= c # char
+ // ::= a # signed char
+ // ::= h # unsigned char
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # int
+ // ::= j # unsigned int
+ // ::= l # long
+ // ::= m # unsigned long
+ // ::= x # long long, __int64
+ // ::= y # unsigned long long, __int64
+ // ::= n # __int128
+ // UNSUPPORTED: ::= o # unsigned __int128
+ // ::= f # float
+ // ::= d # double
+ // ::= e # long double, __float80
+ // UNSUPPORTED: ::= g # __float128
+ // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
+ // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
+ // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
+ // UNSUPPORTED: ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ // ::= Di # char32_t
+ // ::= Ds # char16_t
+ // ::= u <source-name> # vendor extended type
+ // From our point of view, std::nullptr_t is a builtin, but as far as mangling
+ // is concerned, it's a type called std::nullptr_t.
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'v'; break;
+ case BuiltinType::Bool: Out << 'b'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
+ case BuiltinType::UChar: Out << 'h'; break;
+ case BuiltinType::UShort: Out << 't'; break;
+ case BuiltinType::UInt: Out << 'j'; break;
+ case BuiltinType::ULong: Out << 'm'; break;
+ case BuiltinType::ULongLong: Out << 'y'; break;
+ case BuiltinType::UInt128: Out << 'o'; break;
+ case BuiltinType::SChar: Out << 'a'; break;
+ case BuiltinType::WChar: Out << 'w'; break;
+ case BuiltinType::Char16: Out << "Ds"; break;
+ case BuiltinType::Char32: Out << "Di"; break;
+ case BuiltinType::Short: Out << 's'; break;
+ case BuiltinType::Int: Out << 'i'; break;
+ case BuiltinType::Long: Out << 'l'; break;
+ case BuiltinType::LongLong: Out << 'x'; break;
+ case BuiltinType::Int128: Out << 'n'; break;
+ case BuiltinType::Float: Out << 'f'; break;
+ case BuiltinType::Double: Out << 'd'; break;
+ case BuiltinType::LongDouble: Out << 'e'; break;
+ case BuiltinType::NullPtr: Out << "St9nullptr_t"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ case BuiltinType::UndeducedAuto:
+ assert(0 && "Should not see undeduced auto here");
+ break;
+ case BuiltinType::ObjCId: Out << "11objc_object"; break;
+ case BuiltinType::ObjCClass: Out << "10objc_class"; break;
+ case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
+ }
+}
+
+// <type> ::= <function-type>
+// <function-type> ::= F [Y] <bare-function-type> E
+void CXXNameMangler::mangleType(const FunctionProtoType *T) {
+ Out << 'F';
+ // FIXME: We don't have enough information in the AST to produce the 'Y'
+ // encoding for extern "C" function types.
+ mangleBareFunctionType(T, /*MangleReturnType=*/true);
+ Out << 'E';
+}
+void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType) {
+ // We should never be mangling something without a prototype.
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // <bare-function-type> ::= <signature type>+
+ if (MangleReturnType)
+ mangleType(Proto->getResultType());
+
+ if (Proto->getNumArgs() == 0) {
+ Out << 'v';
+ return;
+ }
+
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const TagType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= A <positive dimension number> _ <element type>
+// ::= A [<dimension expression>] _ <element type>
+void CXXNameMangler::mangleType(const ConstantArrayType *T) {
+ Out << 'A' << T->getSize() << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const VariableArrayType *T) {
+ Out << 'A';
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ Out << 'A';
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ Out << 'A' << '_';
+ mangleType(T->getElementType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= M <class type> <member type>
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+ Out << 'M';
+ mangleType(QualType(T->getClass(), 0));
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
+ mangleType(FPT);
+ } else
+ mangleType(PointeeType);
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ mangleTemplateParameter(T->getIndex());
+}
+
+// FIXME: <type> ::= <template-template-param> <template-args>
+
+// <type> ::= P <type> # pointer-to
+void CXXNameMangler::mangleType(const PointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= R <type> # reference-to
+void CXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'R';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= O <type> # rvalue reference-to (C++0x)
+void CXXNameMangler::mangleType(const RValueReferenceType *T) {
+ Out << 'O';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= C <type> # complex pair (C 2000)
+void CXXNameMangler::mangleType(const ComplexType *T) {
+ Out << 'C';
+ mangleType(T->getElementType());
+}
+
+// GNU extension: vector types
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _ <element type>
+// ::= Dv [<dimension expression>] _ <element type>
+void CXXNameMangler::mangleType(const VectorType *T) {
+ Out << "Dv" << T->getNumElements() << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const ExtVectorType *T) {
+ mangleType(static_cast<const VectorType*>(T));
+}
+void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ Out << "Dv";
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void CXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "U13block_pointer";
+ mangleType(T->getPointeeType());
+}
+
+void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ // Mangle injected class name types as if the user had written the
+ // specialization out fully. It may not actually be possible to see
+ // this mangling, though.
+ mangleType(T->getInjectedSpecializationType());
+}
+
+void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
+ mangleName(TD, T->getArgs(), T->getNumArgs());
+ } else {
+ if (mangleSubstitution(QualType(T, 0)))
+ return;
+
+ mangleTemplatePrefix(T->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
+ addSubstitution(QualType(T, 0));
+ }
+}
+
+void CXXNameMangler::mangleType(const DependentNameType *T) {
+ // Typename types are always nested
+ Out << 'N';
+ if (T->getIdentifier()) {
+ mangleUnresolvedScope(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ } else {
+ const TemplateSpecializationType *TST = T->getTemplateId();
+ if (!mangleSubstitution(QualType(TST, 0))) {
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
+ TST->getNumArgs());
+ addSubstitution(QualType(TST, 0));
+ }
+ }
+
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const TypeOfType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const TypeOfExprType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const DecltypeType *T) {
+ Expr *E = T->getUnderlyingExpr();
+
+ // type ::= Dt <expression> E # decltype of an id-expression
+ // # or class member access
+ // ::= DT <expression> E # decltype of an expression
+
+ // This purports to be an exhaustive list of id-expressions and
+ // class member accesses. Note that we do not ignore parentheses;
+ // parentheses change the semantics of decltype for these
+ // expressions (and cause the mangler to use the other form).
+ if (isa<DeclRefExpr>(E) ||
+ isa<MemberExpr>(E) ||
+ isa<UnresolvedLookupExpr>(E) ||
+ isa<DependentScopeDeclRefExpr>(E) ||
+ isa<CXXDependentScopeMemberExpr>(E) ||
+ isa<UnresolvedMemberExpr>(E))
+ Out << "Dt";
+ else
+ Out << "DT";
+ mangleExpression(E);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleIntegerLiteral(QualType T,
+ const llvm::APSInt &Value) {
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ Out << 'L';
+
+ mangleType(T);
+ if (T->isBooleanType()) {
+ // Boolean values are encoded as 0/1.
+ Out << (Value.getBoolValue() ? '1' : '0');
+ } else {
+ if (Value.isNegative())
+ Out << 'n';
+ Value.abs().print(Out, false);
+ }
+ Out << 'E';
+
+}
+
+void CXXNameMangler::mangleCalledExpression(const Expr *E, unsigned Arity) {
+ if (E->getType() != getASTContext().OverloadTy)
+ mangleExpression(E);
+ // propagate arity to dependent overloads?
+
+ llvm::PointerIntPair<OverloadExpr*,1> R
+ = OverloadExpr::find(const_cast<Expr*>(E));
+ if (R.getInt())
+ Out << "an"; // &
+ const OverloadExpr *Ovl = R.getPointer();
+ if (const UnresolvedMemberExpr *ME = dyn_cast<UnresolvedMemberExpr>(Ovl)) {
+ mangleMemberExpr(ME->getBase(), ME->isArrow(), ME->getQualifier(),
+ ME->getMemberName(), Arity);
+ return;
+ }
+
+ mangleUnresolvedName(Ovl->getQualifier(), Ovl->getName(), Arity);
+}
+
+/// Mangles a member expression. Implicit accesses are not handled,
+/// but that should be okay, because you shouldn't be able to
+/// make an implicit access in a function template declaration.
+void CXXNameMangler::mangleMemberExpr(const Expr *Base,
+ bool IsArrow,
+ NestedNameSpecifier *Qualifier,
+ DeclarationName Member,
+ unsigned Arity) {
+ // gcc-4.4 uses 'dt' for dot expressions, which is reasonable.
+ // OTOH, gcc also mangles the name as an expression.
+ Out << (IsArrow ? "pt" : "dt");
+ mangleExpression(Base);
+ mangleUnresolvedName(Qualifier, Member, Arity);
+}
+
+void CXXNameMangler::mangleExpression(const Expr *E) {
+ // <expression> ::= <unary operator-name> <expression>
+ // ::= <binary operator-name> <expression> <expression>
+ // ::= <trinary operator-name> <expression> <expression> <expression>
+ // ::= cl <expression>* E # call
+ // ::= cv <type> expression # conversion with one argument
+ // ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+ // ::= st <type> # sizeof (a type)
+ // ::= at <type> # alignof (a type)
+ // ::= <template-param>
+ // ::= <function-param>
+ // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
+ // ::= sZ <template-param> # size of a parameter pack
+ // ::= <expr-primary>
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ // ::= L <type <value float> E # floating literal
+ // ::= L <mangled-name> E # external name
+ switch (E->getStmtClass()) {
+ case Expr::NoStmtClass:
+#define EXPR(Type, Base)
+#define STMT(Type, Base) \
+ case Expr::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ llvm_unreachable("unexpected statement kind");
+ break;
+
+ default: {
+ // As bad as this diagnostic is, it's better than crashing.
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot yet mangle expression type %0");
+ Diags.Report(FullSourceLoc(E->getExprLoc(),
+ getASTContext().getSourceManager()),
+ DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ case Expr::CallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+ Out << "cl";
+ mangleCalledExpression(CE->getCallee(), CE->getNumArgs());
+ for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
+ mangleExpression(CE->getArg(I));
+ Out << 'E';
+ break;
+ }
+
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getMemberDecl()->getDeclName(),
+ UnknownArity);
+ break;
+ }
+
+ case Expr::UnresolvedMemberExprClass: {
+ const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getMemberName(),
+ UnknownArity);
+ break;
+ }
+
+ case Expr::CXXDependentScopeMemberExprClass: {
+ const CXXDependentScopeMemberExpr *ME
+ = cast<CXXDependentScopeMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getMember(),
+ UnknownArity);
+ break;
+ }
+
+ case Expr::UnresolvedLookupExprClass: {
+ // The ABI doesn't cover how to mangle overload sets, so we mangle
+ // using something as close as possible to the original lookup
+ // expression.
+ const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
+ mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), UnknownArity);
+ break;
+ }
+
+ case Expr::CXXUnresolvedConstructExprClass: {
+ const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
+ unsigned N = CE->arg_size();
+
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(E);
+ unsigned N = CE->getNumArgs();
+
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::SizeOfAlignOfExprClass: {
+ const SizeOfAlignOfExpr *SAE = cast<SizeOfAlignOfExpr>(E);
+ if (SAE->isSizeOf()) Out << 's';
+ else Out << 'a';
+ if (SAE->isArgumentType()) {
+ Out << 't';
+ mangleType(SAE->getArgumentType());
+ } else {
+ Out << 'z';
+ mangleExpression(SAE->getArgumentExpr());
+ }
+ break;
+ }
+
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(E);
+ mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
+ /*Arity=*/1);
+ mangleExpression(UO->getSubExpr());
+ break;
+ }
+
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(E);
+ mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
+ /*Arity=*/2);
+ mangleExpression(BO->getLHS());
+ mangleExpression(BO->getRHS());
+ break;
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ mangleOperatorName(OO_Conditional, /*Arity=*/3);
+ mangleExpression(CO->getCond());
+ mangleExpression(CO->getLHS());
+ mangleExpression(CO->getRHS());
+ break;
+ }
+
+ case Expr::ImplicitCastExprClass: {
+ mangleExpression(cast<ImplicitCastExpr>(E)->getSubExpr());
+ break;
+ }
+
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::CXXFunctionalCastExprClass: {
+ const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
+ Out << "cv";
+ mangleType(ECE->getType());
+ mangleExpression(ECE->getSubExpr());
+ break;
+ }
+
+ case Expr::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
+ unsigned NumArgs = CE->getNumArgs();
+ mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
+ // Mangle the arguments.
+ for (unsigned i = 0; i != NumArgs; ++i)
+ mangleExpression(CE->getArg(i));
+ break;
+ }
+
+ case Expr::ParenExprClass:
+ mangleExpression(cast<ParenExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::DeclRefExprClass: {
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ switch (D->getKind()) {
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+
+ case Decl::NonTypeTemplateParm: {
+ const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+ mangleTemplateParameter(PD->getIndex());
+ break;
+ }
+
+ }
+
+ break;
+ }
+
+ case Expr::DependentScopeDeclRefExprClass: {
+ const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
+ NestedNameSpecifier *NNS = DRE->getQualifier();
+ const Type *QTy = NNS->getAsType();
+
+ // When we're dealing with a nested-name-specifier that has just a
+ // dependent identifier in it, mangle that as a typename. FIXME:
+ // It isn't clear that we ever actually want to have such a
+ // nested-name-specifier; why not just represent it as a typename type?
+ if (!QTy && NNS->getAsIdentifier() && NNS->getPrefix()) {
+ QTy = getASTContext().getDependentNameType(ETK_Typename,
+ NNS->getPrefix(),
+ NNS->getAsIdentifier())
+ .getTypePtr();
+ }
+ assert(QTy && "Qualifier was not type!");
+
+ // ::= sr <type> <unqualified-name> # dependent name
+ Out << "sr";
+ mangleType(QualType(QTy, 0));
+
+ assert(DRE->getDeclName().getNameKind() == DeclarationName::Identifier &&
+ "Unhandled decl name kind!");
+ mangleSourceName(DRE->getDeclName().getAsIdentifierInfo());
+
+ break;
+ }
+
+ case Expr::CXXBindReferenceExprClass:
+ mangleExpression(cast<CXXBindReferenceExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::CXXBindTemporaryExprClass:
+ mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::CXXExprWithTemporariesClass:
+ mangleExpression(cast<CXXExprWithTemporaries>(E)->getSubExpr());
+ break;
+
+ case Expr::FloatingLiteralClass: {
+ const FloatingLiteral *FL = cast<FloatingLiteral>(E);
+ Out << 'L';
+ mangleType(FL->getType());
+
+ // TODO: avoid this copy with careful stream management.
+ llvm::SmallString<20> Buffer;
+ FL->getValue().bitcastToAPInt().toString(Buffer, 16, false);
+ Out.write(Buffer.data(), Buffer.size());
+
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CharacterLiteralClass:
+ Out << 'L';
+ mangleType(E->getType());
+ Out << cast<CharacterLiteral>(E)->getValue();
+ Out << 'E';
+ break;
+
+ case Expr::CXXBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::IntegerLiteralClass:
+ mangleIntegerLiteral(E->getType(),
+ llvm::APSInt(cast<IntegerLiteral>(E)->getValue()));
+ break;
+
+ }
+}
+
+// FIXME: <type> ::= G <type> # imaginary (C 2000)
+// FIXME: <type> ::= U <source-name> <type> # vendor extended type qualifier
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+ // <ctor-dtor-name> ::= C1 # complete object constructor
+ // ::= C2 # base object constructor
+ // ::= C3 # complete object allocating constructor
+ //
+ switch (T) {
+ case Ctor_Complete:
+ Out << "C1";
+ break;
+ case Ctor_Base:
+ Out << "C2";
+ break;
+ case Ctor_CompleteAllocating:
+ Out << "C3";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // <ctor-dtor-name> ::= D0 # deleting destructor
+ // ::= D1 # complete object destructor
+ // ::= D2 # base object destructor
+ //
+ switch (T) {
+ case Dtor_Deleting:
+ Out << "D0";
+ break;
+ case Dtor_Complete:
+ Out << "D1";
+ break;
+ case Dtor_Base:
+ Out << "D2";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplateArgs(*TD->getTemplateParameters(), TemplateArgs,
+ NumTemplateArgs);
+
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(0, TemplateArgs[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0, e = AL.size(); i != e; ++i)
+ mangleTemplateArg(PL.getParam(i), AL[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
+ const TemplateArgument &A) {
+ // <template-arg> ::= <type> # type or template
+ // ::= X <expression> E # expression
+ // ::= <expr-primary> # simple expressions
+ // ::= I <template-arg>* E # argument pack
+ // ::= sp <expression> # pack expansion of (C++0x)
+ switch (A.getKind()) {
+ default:
+ assert(0 && "Unknown template argument kind!");
+ case TemplateArgument::Type:
+ mangleType(A.getAsType());
+ break;
+ case TemplateArgument::Template:
+ assert(A.getAsTemplate().getAsTemplateDecl() &&
+ "Can't get dependent template names here");
+ mangleName(A.getAsTemplate().getAsTemplateDecl());
+ break;
+ case TemplateArgument::Expression:
+ Out << 'X';
+ mangleExpression(A.getAsExpr());
+ Out << 'E';
+ break;
+ case TemplateArgument::Integral:
+ mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
+ break;
+ case TemplateArgument::Declaration: {
+ assert(P && "Missing template parameter for declaration argument");
+ // <expr-primary> ::= L <mangled-name> E # external name
+
+ // Clang produces AST's where pointer-to-member-function expressions
+ // and pointer-to-function expressions are represented as a declaration not
+ // an expression. We compensate for it here to produce the correct mangling.
+ NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
+ const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
+ bool compensateMangling = D->isCXXClassMember() &&
+ !Parameter->getType()->isReferenceType();
+ if (compensateMangling) {
+ Out << 'X';
+ mangleOperatorName(OO_Amp, 1);
+ }
+
+ Out << 'L';
+ // References to external entities use the mangled name; if the name would
+ // not normally be manged then mangle it as unqualified.
+ //
+ // FIXME: The ABI specifies that external names here should have _Z, but
+ // gcc leaves this off.
+ if (compensateMangling)
+ mangle(D, "_Z");
+ else
+ mangle(D, "Z");
+ Out << 'E';
+
+ if (compensateMangling)
+ Out << 'E';
+
+ break;
+ }
+ }
+}
+
+void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
+ // <template-param> ::= T_ # first template parameter
+ // ::= T <parameter-2 non-negative number> _
+ if (Index == 0)
+ Out << "T_";
+ else
+ Out << 'T' << (Index - 1) << '_';
+}
+
+// <substitution> ::= S <seq-id> _
+// ::= S_
+bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
+ // Try one of the standard substitutions first.
+ if (mangleStandardSubstitution(ND))
+ return true;
+
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+ return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
+}
+
+bool CXXNameMangler::mangleSubstitution(QualType T) {
+ if (!T.getCVRQualifiers()) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return mangleSubstitution(RT->getDecl());
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+
+ return mangleSubstitution(TypePtr);
+}
+
+bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ return mangleSubstitution(
+ reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
+ llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
+ if (I == Substitutions.end())
+ return false;
+
+ unsigned SeqID = I->second;
+ if (SeqID == 0)
+ Out << "S_";
+ else {
+ SeqID--;
+
+ // <seq-id> is encoded in base-36, using digits and upper case letters.
+ char Buffer[10];
+ char *BufferPtr = llvm::array_endof(Buffer);
+
+ if (SeqID == 0) *--BufferPtr = '0';
+
+ while (SeqID) {
+ assert(BufferPtr > Buffer && "Buffer overflow!");
+
+ unsigned char c = static_cast<unsigned char>(SeqID) % 36;
+
+ *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10);
+ SeqID /= 36;
+ }
+
+ Out << 'S'
+ << llvm::StringRef(BufferPtr, llvm::array_endof(Buffer)-BufferPtr)
+ << '_';
+ }
+
+ return true;
+}
+
+static bool isCharType(QualType T) {
+ if (T.isNull())
+ return false;
+
+ return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ T->isSpecificBuiltinType(BuiltinType::Char_U);
+}
+
+/// isCharSpecialization - Returns whether a given type is a template
+/// specialization of a given name with a single argument of type char.
+static bool isCharSpecialization(QualType T, const char *Name) {
+ if (T.isNull())
+ return false;
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!SD)
+ return false;
+
+ if (!isStdNamespace(SD->getDeclContext()))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 1)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ return SD->getIdentifier()->getName() == Name;
+}
+
+template <std::size_t StrLen>
+bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl *SD,
+ const char (&Str)[StrLen]) {
+ if (!SD->getIdentifier()->isStr(Str))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 2)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ return true;
+}
+
+bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
+ // <substitution> ::= St # ::std::
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (isStd(NS)) {
+ Out << "St";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
+ if (!isStdNamespace(TD->getDeclContext()))
+ return false;
+
+ // <substitution> ::= Sa # ::std::allocator
+ if (TD->getIdentifier()->isStr("allocator")) {
+ Out << "Sa";
+ return true;
+ }
+
+ // <<substitution> ::= Sb # ::std::basic_string
+ if (TD->getIdentifier()->isStr("basic_string")) {
+ Out << "Sb";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ if (!isStdNamespace(SD->getDeclContext()))
+ return false;
+
+ // <substitution> ::= Ss # ::std::basic_string<char,
+ // ::std::char_traits<char>,
+ // ::std::allocator<char> >
+ if (SD->getIdentifier()->isStr("basic_string")) {
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+
+ if (TemplateArgs.size() != 3)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
+ return false;
+
+ Out << "Ss";
+ return true;
+ }
+
+ // <substitution> ::= Si # ::std::basic_istream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_istream")) {
+ Out << "Si";
+ return true;
+ }
+
+ // <substitution> ::= So # ::std::basic_ostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_ostream")) {
+ Out << "So";
+ return true;
+ }
+
+ // <substitution> ::= Sd # ::std::basic_iostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_iostream")) {
+ Out << "Sd";
+ return true;
+ }
+ }
+ return false;
+}
+
+void CXXNameMangler::addSubstitution(QualType T) {
+ if (!T.getCVRQualifiers()) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ addSubstitution(RT->getDecl());
+ return;
+ }
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ addSubstitution(TypePtr);
+}
+
+void CXXNameMangler::addSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return addSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
+ unsigned SeqID = Substitutions.size();
+
+ assert(!Substitutions.count(Ptr) && "Substitution already exists!");
+ Substitutions[Ptr] = SeqID;
+}
+
+//
+
+/// \brief Mangles the name of the declaration D and emits that name to the
+/// given output stream.
+///
+/// If the declaration D requires a mangled name, this routine will emit that
+/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
+/// and this routine will return false. In this case, the caller should just
+/// emit the identifier of the declaration (\c D->getIdentifier()) as its
+/// name.
+void MangleContext::mangleName(const NamedDecl *D,
+ llvm::SmallVectorImpl<char> &Res) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ CXXNameMangler Mangler(*this, Res);
+ return Mangler.mangle(D);
+}
+
+void MangleContext::mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &Res) {
+ CXXNameMangler Mangler(*this, Res, D, Type);
+ Mangler.mangle(D);
+}
+
+void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &Res) {
+ CXXNameMangler Mangler(*this, Res, D, Type);
+ Mangler.mangle(D);
+}
+
+void MangleContext::mangleBlock(const BlockDecl *BD,
+ llvm::SmallVectorImpl<char> &Res) {
+ MiscNameMangler Mangler(*this, Res);
+ Mangler.mangleBlock(BD);
+}
+
+void MangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // # first call-offset is 'this' adjustment
+ // # second call-offset is result adjustment
+
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Use mangleCXXDtor for destructor decls!");
+
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZT";
+ if (!Thunk.Return.isEmpty())
+ Mangler.getStream() << 'c';
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset);
+
+ // Mangle the return pointer adjustment if there is one.
+ if (!Thunk.Return.isEmpty())
+ Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(MD);
+}
+
+void
+MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+
+ CXXNameMangler Mangler(*this, Res, DD, Type);
+ Mangler.getStream() << "_ZT";
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
+ ThisAdjustment.VCallOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(DD);
+}
+
+/// mangleGuardVariable - Returns the mangled name for a guard variable
+/// for the passed in VarDecl.
+void MangleContext::mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= GV <object name> # Guard variable for one-time
+ // # initialization
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZGV";
+ Mangler.mangleName(D);
+}
+
+void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TV <type> # virtual table
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTV";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TT <type> # VTT structure
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTT";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TC <type> <offset number> _ <base type>
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTC";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+ Mangler.getStream() << Offset;
+ Mangler.getStream() << '_';
+ Mangler.mangleNameOrStandardSubstitution(Type);
+}
+
+void MangleContext::mangleCXXRTTI(QualType Ty,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TI <type> # typeinfo structure
+ assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTI";
+ Mangler.mangleType(Ty);
+}
+
+void MangleContext::mangleCXXRTTIName(QualType Ty,
+ llvm::SmallVectorImpl<char> &Res) {
+ // <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZTS";
+ Mangler.mangleType(Ty);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
new file mode 100644
index 0000000..f1c5358
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
@@ -0,0 +1,171 @@
+//===--- Mangle.h - Mangle C++ Names ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_MANGLE_H
+#define LLVM_CLANG_CODEGEN_MANGLE_H
+
+#include "CGCXX.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+ class ASTContext;
+ class BlockDecl;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class FunctionDecl;
+ class NamedDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ struct ThisAdjustment;
+ struct ThunkInfo;
+
+/// MangleBuffer - a convenient class for storing a name which is
+/// either the result of a mangling or is a constant string with
+/// external memory ownership.
+class MangleBuffer {
+public:
+ void setString(llvm::StringRef Ref) {
+ String = Ref;
+ }
+
+ llvm::SmallVectorImpl<char> &getBuffer() {
+ return Buffer;
+ }
+
+ llvm::StringRef getString() const {
+ if (!String.empty()) return String;
+ return Buffer.str();
+ }
+
+ operator llvm::StringRef() const {
+ return getString();
+ }
+
+private:
+ llvm::StringRef String;
+ llvm::SmallString<256> Buffer;
+};
+
+/// MangleContext - Context for tracking state which persists across multiple
+/// calls to the C++ name mangler.
+class MangleContext {
+ ASTContext &Context;
+ Diagnostic &Diags;
+
+ llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
+ unsigned Discriminator;
+ llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+ llvm::DenseMap<const BlockDecl*, unsigned> GlobalBlockIds;
+ llvm::DenseMap<const BlockDecl*, unsigned> LocalBlockIds;
+
+public:
+ explicit MangleContext(ASTContext &Context,
+ Diagnostic &Diags)
+ : Context(Context), Diags(Diags) { }
+
+ ASTContext &getASTContext() const { return Context; }
+
+ Diagnostic &getDiags() const { return Diags; }
+
+ void startNewFunction() { LocalBlockIds.clear(); }
+
+ uint64_t getAnonymousStructId(const TagDecl *TD) {
+ std::pair<llvm::DenseMap<const TagDecl *,
+ uint64_t>::iterator, bool> Result =
+ AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
+ return Result.first->second;
+ }
+
+ unsigned getBlockId(const BlockDecl *BD, bool Local) {
+ llvm::DenseMap<const BlockDecl *, unsigned> &BlockIds
+ = Local? LocalBlockIds : GlobalBlockIds;
+ std::pair<llvm::DenseMap<const BlockDecl *, unsigned>::iterator, bool>
+ Result = BlockIds.insert(std::make_pair(BD, BlockIds.size()));
+ return Result.first->second;
+ }
+
+ /// @name Mangler Entry Points
+ /// @{
+
+ bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ void mangleBlock(const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
+
+ void mangleInitDiscriminator() {
+ Discriminator = 0;
+ }
+
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator)
+ discriminator = ++Discriminator;
+ if (discriminator == 1)
+ return false;
+ disc = discriminator-2;
+ return true;
+ }
+ /// @}
+};
+
+/// MiscNameMangler - Mangles Objective-C method names and blocks.
+class MiscNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MiscNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res);
+
+ llvm::raw_svector_ostream &getStream() { return Out; }
+
+ void mangleBlock(const BlockDecl *BD);
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..9905ca6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,107 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CodeGenModule.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+namespace {
+ class CodeGeneratorImpl : public CodeGenerator {
+ Diagnostic &Diags;
+ llvm::OwningPtr<const llvm::TargetData> TD;
+ ASTContext *Ctx;
+ const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+ protected:
+ llvm::OwningPtr<llvm::Module> M;
+ llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
+ public:
+ CodeGeneratorImpl(Diagnostic &diags, const std::string& ModuleName,
+ const CodeGenOptions &CGO, llvm::LLVMContext& C)
+ : Diags(diags), CodeGenOpts(CGO), M(new llvm::Module(ModuleName, C)) {}
+
+ virtual ~CodeGeneratorImpl() {}
+
+ virtual llvm::Module* GetModule() {
+ return M.get();
+ }
+
+ virtual llvm::Module* ReleaseModule() {
+ return M.take();
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+
+ M->setTargetTriple(Ctx->Target.getTriple().getTriple());
+ M->setDataLayout(Ctx->Target.getTargetDescription());
+ TD.reset(new llvm::TargetData(Ctx->Target.getTargetDescription()));
+ Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
+ *M, *TD, Diags));
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef DG) {
+ // Make sure to emit all elements of a Decl.
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ Builder->EmitTopLevelDecl(*I);
+ }
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// to (e.g. struct, union, enum, class) is completed. This allows the
+ /// client hack on the type, which can occur at any point in the file
+ /// (because these can be defined in declspecs).
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ Builder->UpdateCompletedType(D);
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ if (Diags.hasErrorOccurred()) {
+ M.reset();
+ return;
+ }
+
+ if (Builder)
+ Builder->Release();
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitVTable(RD, DefinitionRequired);
+ }
+ };
+}
+
+CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
+ const std::string& ModuleName,
+ const CodeGenOptions &CGO,
+ llvm::LLVMContext& C) {
+ return new CodeGeneratorImpl(Diags, ModuleName, CGO, C);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/README.txt b/contrib/llvm/tools/clang/lib/CodeGen/README.txt
new file mode 100644
index 0000000..e6d6109
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/README.txt
@@ -0,0 +1,47 @@
+IRgen optimization opportunities.
+
+//===---------------------------------------------------------------------===//
+
+The common pattern of
+--
+short x; // or char, etc
+(x == 10)
+--
+generates an zext/sext of x which can easily be avoided.
+
+//===---------------------------------------------------------------------===//
+
+Bitfields accesses can be shifted to simplify masking and sign
+extension. For example, if the bitfield width is 8 and it is
+appropriately aligned then is is a lot shorter to just load the char
+directly.
+
+//===---------------------------------------------------------------------===//
+
+It may be worth avoiding creation of alloca's for formal arguments
+for the common situation where the argument is never written to or has
+its address taken. The idea would be to begin generating code by using
+the argument directly and if its address is taken or it is stored to
+then generate the alloca and patch up the existing code.
+
+In theory, the same optimization could be a win for block local
+variables as long as the declaration dominates all statements in the
+block.
+
+NOTE: The main case we care about this for is for -O0 -g compile time
+performance, and in that scenario we will need to emit the alloca
+anyway currently to emit proper debug info. So this is blocked by
+being able to emit debug information which refers to an LLVM
+temporary, not an alloca.
+
+//===---------------------------------------------------------------------===//
+
+We should try and avoid generating basic blocks which only contain
+jumps. At -O0, this penalizes us all the way from IRgen (malloc &
+instruction overhead), all the way down through code generation and
+assembly time.
+
+On 176.gcc:expr.ll, it looks like over 12% of basic blocks are just
+direct branches!
+
+//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
new file mode 100644
index 0000000..b29d3cb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -0,0 +1,2252 @@
+//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace CodeGen;
+
+static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array,
+ llvm::Value *Value,
+ unsigned FirstIndex,
+ unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
+ Builder.CreateStore(Value, Cell);
+ }
+}
+
+ABIInfo::~ABIInfo() {}
+
+void ABIArgInfo::dump() const {
+ llvm::raw_ostream &OS = llvm::errs();
+ OS << "(ABIArgInfo Kind=";
+ switch (TheKind) {
+ case Direct:
+ OS << "Direct";
+ break;
+ case Extend:
+ OS << "Extend";
+ break;
+ case Ignore:
+ OS << "Ignore";
+ break;
+ case Coerce:
+ OS << "Coerce Type=";
+ getCoerceToType()->print(OS);
+ break;
+ case Indirect:
+ OS << "Indirect Align=" << getIndirectAlign()
+ << " Byal=" << getIndirectByVal();
+ break;
+ case Expand:
+ OS << "Expand";
+ break;
+ }
+ OS << ")\n";
+}
+
+TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ FT = AT->getElementType();
+
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ if (isa<CXXRecordDecl>(RT->getDecl()))
+ return false;
+
+ return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isEmptyRecord(Context, i->getType(), true))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i)
+ if (!isEmptyField(Context, *i, AllowArrays))
+ return false;
+ return true;
+}
+
+/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
+/// a non-trivial destructor or a non-trivial copy constructor.
+static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+
+ return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
+}
+
+/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
+/// a record type with either a non-trivial destructor or a non-trivial copy
+/// constructor.
+static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return hasNonTrivialDestructorOrCopyConstructor(RT);
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, i->getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return 0;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(i->getType(), Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ // Check for single element.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
+ !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
+ !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+/// canExpandIndirectArgument - Test whether an argument type which is to be
+/// passed indirectly (on the stack) would have the equivalent layout if it was
+/// expanded into separate arguments. If so, we prefer to do the latter to avoid
+/// inhibiting optimizations.
+///
+// FIXME: This predicate is missing many cases, currently it just follows
+// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
+// should probably make this smarter, or better yet make the LLVM backend
+// capable of handling it.
+static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // We can only expand (C) structures.
+ //
+ // FIXME: This needs to be generalized to handle classes as well.
+ const RecordDecl *RD = RT->getDecl();
+ if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
+};
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ ASTContext &Context;
+ bool IsDarwinVectorABI;
+ bool IsSmallStructInRegABI;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context,
+ bool ByVal = true) const;
+
+public:
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(ASTContext &Context, bool d, bool p)
+ : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
+ IsSmallStructInRegABI(p) {}
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
+ :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {}
+
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ // Darwin uses different dwarf register numbers for EH.
+ if (CGM.isTargetDarwin()) return 5;
+
+ return 4;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // FIXME: Traverse bases here too.
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
+ e = RT->getDecl()->field_end(); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getCoerce(llvm::VectorType::get(
+ llvm::Type::getInt64Ty(VMContext), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Classify "single element" structs as their element type.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
+ if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
+ if (BT->isIntegerType()) {
+ // We need to use the size of the structure, padding
+ // bit-fields can adjust that to be larger than the single
+ // element type.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(
+ llvm::IntegerType::get(VMContext, (unsigned) Size));
+ } else if (BT->getKind() == BuiltinType::Float) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
+ } else if (BT->getKind() == BuiltinType::Double) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
+ }
+ } else if (SeltTy->isPointerType()) {
+ // FIXME: It would be really nice if this could come out as the proper
+ // pointer type.
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ return ABIArgInfo::getCoerce(PtrTy);
+ } else if (SeltTy->isVectorType()) {
+ // 64- and 128-bit vectors are never returned in a
+ // register when inside a structure.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size == 64 || Size == 128)
+ return ABIArgInfo::getIndirect(0);
+
+ return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
+ }
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context,
+ bool ByVal) const {
+ if (!ByVal)
+ return ABIArgInfo::getIndirect(0, false);
+
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 4;
+ unsigned Align = Context.getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return getIndirectResult(Ty, Context, /*ByVal=*/false);
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty, Context);
+ }
+
+ // Ignore empty structs.
+ if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ if (Context.getTypeSize(Ty) <= 4*32 &&
+ canExpandIndirectArgument(Ty, Context))
+ return ABIArgInfo::getExpand();
+
+ return getIndirectResult(Ty, Context);
+ } else {
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ // Get the LLVM function.
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ // Now add the 'alignstack' attribute with a value of 16.
+ Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
+ }
+ }
+}
+
+bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+
+ // 0-7 are the eight integer registers; the order is different
+ // on Darwin (for EH), but the range is the same.
+ // 8 is %eip.
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
+
+ if (CGF.CGM.isTargetDarwin()) {
+ // 12-16 are st(0..4). Not sure why we stop at 4.
+ // These have size 16, which is sizeof(long double) on
+ // platforms with 8-byte alignment for that type.
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
+
+ } else {
+ // 9 is %eflags, which doesn't get a size on Darwin for some
+ // reason.
+ Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
+
+ // 11-16 are st(0..5). Not sure why we stop at 5.
+ // These have size 12, which is sizeof(long double) on
+ // platforms with 4-byte alignment for that type.
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
+
+ return false;
+}
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ Class merge(Class Accum, Class Field) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const;
+
+ /// getCoerceResult - Given a source type \arg Ty and an LLVM type
+ /// to coerce to, chose the best way to pass Ty in the same place
+ /// that \arg CoerceTo would be passed, but while keeping the
+ /// emitted code as simple as possible.
+ ///
+ /// FIXME: Note, this should be cleaned up to just take an enumeration of all
+ /// the ways we might want to pass things, instead of constructing an LLVM
+ /// type. This makes this code more explicit, and it makes it clearer that we
+ /// are also doing this for correctness in the case of passing scalar types.
+ ABIArgInfo getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty, ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext,
+ unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+public:
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(Builder, Address, Eight8, 0, 16);
+
+ return false;
+ }
+};
+
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
+ Class Field) const {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ else if (Field == Memory)
+ return Memory;
+ else if (Accum == NoClass)
+ return Field;
+ else if (Accum == Integer || Field == Integer)
+ return Integer;
+ else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ else
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty,
+ ASTContext &Context,
+ uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ } else if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
+ } else if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ } else if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType())
+ Lo = Hi = Integer;
+ else
+ Current = Integer;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = Context.getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128) {
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = Context.getCanonicalType(CT->getElementType());
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (ET->isIntegralType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == Context.FloatTy)
+ Current = SSE;
+ else if (ET == Context.DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == Context.LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = Context.getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // Do post merger cleanup (see below). Only case we worry about is Memory.
+ if (Hi == Memory)
+ Lo = Memory;
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
+ classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // If this record has no fields but isn't empty, classify as INTEGER.
+ if (RD->field_empty() && Size)
+ Current = Integer;
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is MEMORY, the whole argument is
+ // passed in memory.
+ //
+ // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
+
+ // The first of these conditions is guaranteed by how we implement
+ // the merge (just bail).
+ //
+ // The second condition occurs in the case of unions; for example
+ // union { _Complex double; unsigned; }.
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const {
+ if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
+ // Integer and pointer types will end up in a general purpose
+ // register.
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralType() || Ty->hasPointerRepresentation())
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
+ assert(Ty.isCanonical() && "should always have a canonical type here");
+ assert(!Ty.hasQualifiers() && "should never have a qualified type here");
+
+ // Float and double end up in a single SSE reg.
+ if (Ty == Context.FloatTy || Ty == Context.DoubleTy)
+ return ABIArgInfo::getDirect();
+
+ }
+
+ return ABIArgInfo::getCoerce(CoerceTo);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 8;
+ unsigned Align = Context.getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectReturnResult(RetTy, Context);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = llvm::Type::getInt64Ty(VMContext); break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = llvm::Type::getDoubleTy(VMContext); break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(VMContext,
+ llvm::Type::getX86_FP80Ty(VMContext),
+ llvm::Type::getX86_FP80Ty(VMContext),
+ NULL);
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ assert(0 && "Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass: break;
+
+ case Integer:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getInt64Ty(VMContext), NULL);
+ break;
+ case SSE:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getDoubleTy(VMContext), NULL);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the upper half of the last used SSE register.
+ //
+ // SSEUP should always be preceeded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceeded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceeded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87)
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getDoubleTy(VMContext), NULL);
+ break;
+ }
+
+ return getCoerceResult(RetTy, ResType, Context);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext,
+ unsigned &neededInt,
+ unsigned &neededSSE) const {
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ return getIndirectResult(Ty, Context);
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+ ResType = llvm::Type::getInt64Ty(VMContext);
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE:
+ ++neededSSE;
+ ResType = llvm::Type::getDoubleTy(VMContext);
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceed by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ assert(0 && "Invalid classification for hi word.");
+ break;
+
+ case NoClass: break;
+ case Integer:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getInt64Ty(VMContext), NULL);
+ ++neededInt;
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getDoubleTy(VMContext), NULL);
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+ break;
+ }
+
+ return getCoerceResult(Ty, ResType, Context);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ Context, VMContext);
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, Context, VMContext,
+ neededInt, neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, Context);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // Note that we follow the ABI & gcc here, even though the type
+ // could in theory have an alignment greater than 16. This case
+ // shouldn't ever matter in practice.
+
+ // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ llvm::Type::getInt64Ty(CGF.getLLVMContext()));
+ llvm::Value *Mask = llvm::ConstantInt::get(
+ llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
+ (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+ const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
+ const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
+ neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs =
+ CGF.Builder.CreateICmpULE(gp_offset,
+ llvm::ConstantInt::get(i32Ty,
+ 48 - neededInt * 8),
+ "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ CGF.Builder.CreateICmpULE(fp_offset,
+ llvm::ConstantInt::get(i32Ty,
+ 176 - neededSSE * 16),
+ "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+ const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ const llvm::Type *TyLo = ST->getElementType(0);
+ const llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFloatingPointTy() ^ TyHi->isFloatingPointTy()) &&
+ "Unexpected ABI info for mixed regs");
+ const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi =
+ CGF.Builder.CreateGEP(RegAddrLo,
+ llvm::ConstantInt::get(i32Ty, 16));
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ DoubleTy, NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+ "vaarg.addr");
+ ResAddr->reserveOperandSpace(2);
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+
+ return ResAddr;
+}
+
+// PIC16 ABI Implementation
+
+namespace {
+
+class PIC16ABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {}
+};
+
+}
+
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ return ABIArgInfo::getDirect();
+}
+
+llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8;
+
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+
+// PowerPC-32
+
+namespace {
+class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
+
+
+// ARM ABI Implementation
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+public:
+ enum ABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP
+ };
+
+private:
+ ABIKind Kind;
+
+public:
+ ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
+
+private:
+ ABIKind getABIKind() const { return Kind; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMCOntext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
+ :TargetCodeGenInfo(new ARMABIInfo(K)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 13;
+ }
+};
+
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ // ARM always overrides the calling convention.
+ switch (getABIKind()) {
+ case APCS:
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+ break;
+
+ case AAPCS:
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+ break;
+
+ case AAPCS_VFP:
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
+ break;
+ }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(Context, Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ const llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (Context.getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::getInt64Ty(VMContext);
+ SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::getInt32Ty(VMContext);
+ SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
+ }
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
+ const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
+ return ABIArgInfo::getCoerce(STy);
+}
+
+static bool isIntegerLikeType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Small complex integer types are "integer like".
+ if (const ComplexType *CT = Ty->getAs<ComplexType>())
+ return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Bit-fields are not addressable, we only need to verify they are "integer
+ // like". We still have to disallow a subsequent non-bitfield, for example:
+ // struct { int : 0; int x }
+ // is non-integer like according to gcc.
+ if (FD->isBitField()) {
+ if (!RD->isUnion())
+ HadField = true;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ continue;
+ }
+
+ // Check if this field is at offset 0.
+ if (Layout.getFieldOffset(idx) != 0)
+ return false;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. This doesn't match the
+ // wording above, but follows gcc in situations with a field following an
+ // empty structure.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (!CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Are we following APCS?
+ if (getABIKind() == APCS) {
+ if (isEmptyRecord(Context, RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Complex types are all returned as packed integers.
+ //
+ // FIXME: Consider using 2 x vector types if the back end handles them
+ // correctly.
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(
+ VMContext, Context.getTypeSize(RetTy)));
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, Context, VMContext)) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+ if (Size <= 16)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
+ return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+ }
+
+ // Otherwise return in memory.
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(Context, RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size <= 32) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+ if (Size <= 16)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
+ return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+// SystemZ ABI Implementation
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+ bool isPromotableIntegerType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ Context, VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {}
+};
+
+}
+
+bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Implement
+ return 0;
+}
+
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+// MSP430 ABI Implementation
+
+namespace {
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() + 0xffe0;
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "vector_" +
+ llvm::LowercaseString(llvm::utohexstr(Num)),
+ GV, &M.getModule());
+ }
+ }
+}
+
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+namespace {
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MIPSTargetCodeGenInfo(): TargetCodeGenInfo(new DefaultABIInfo()) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 29;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(Builder, Address, Four8, 80, 181);
+
+ return false;
+}
+
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
+ if (TheTargetCodeGenInfo)
+ return *TheTargetCodeGenInfo;
+
+ // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
+ // free it.
+
+ const llvm::Triple &Triple(getContext().Target.getTriple());
+ switch (Triple.getArch()) {
+ default:
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo);
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo());
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // FIXME: We want to know the float calling convention as well.
+ if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(ARMABIInfo::APCS));
+
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(ARMABIInfo::AAPCS));
+
+ case llvm::Triple::pic16:
+ return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
+
+ case llvm::Triple::ppc:
+ return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo());
+
+ case llvm::Triple::systemz:
+ return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
+
+ case llvm::Triple::msp430:
+ return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo());
+
+ case llvm::Triple::x86:
+ switch (Triple.getOS()) {
+ case llvm::Triple::Darwin:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Context, true, true));
+ case llvm::Triple::Cygwin:
+ case llvm::Triple::MinGW32:
+ case llvm::Triple::MinGW64:
+ case llvm::Triple::AuroraUX:
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Context, false, true));
+
+ default:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Context, false, false));
+ }
+
+ case llvm::Triple::x86_64:
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
new file mode 100644
index 0000000..f0a7824
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
@@ -0,0 +1,98 @@
+//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_TARGETINFO_H
+#define CLANG_CODEGEN_TARGETINFO_H
+
+namespace llvm {
+ class GlobalValue;
+ class Value;
+}
+
+namespace clang {
+ class ABIInfo;
+ class Decl;
+
+ namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenFunction;
+ }
+
+ /// TargetCodeGenInfo - This class organizes various target-specific
+ /// codegeneration issues, like target-specific attributes, builtins and so
+ /// on.
+ class TargetCodeGenInfo {
+ ABIInfo *Info;
+ public:
+ // WARNING: Acquires the ownership of ABIInfo.
+ TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { }
+ virtual ~TargetCodeGenInfo();
+
+ /// getABIInfo() - Returns ABI info helper for the target.
+ const ABIInfo& getABIInfo() const { return *Info; }
+
+ /// SetTargetAttributes - Provides a convenient hook to handle extra
+ /// target-specific attributes for the given global.
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const { }
+
+ /// Controls whether __builtin_extend_pointer should sign-extend
+ /// pointers to uint64_t or zero-extend them (the default). Has
+ /// no effect for targets:
+ /// - that have 64-bit pointers, or
+ /// - that cannot address through registers larger than pointers, or
+ /// - that implicitly ignore/truncate the top bits when addressing
+ /// through such registers.
+ virtual bool extendPointerWithSExt() const { return false; }
+
+ /// Determines the DWARF register number for the stack pointer, for
+ /// exception-handling purposes. Implements __builtin_dwarf_sp_column.
+ ///
+ /// Returns -1 if the operation is unsupported by this target.
+ virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return -1;
+ }
+
+ /// Initializes the given DWARF EH register-size table, a char*.
+ /// Implements __builtin_init_dwarf_reg_size_table.
+ ///
+ /// Returns true if the operation is unsupported by this target.
+ virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return true;
+ }
+
+ /// Performs the code-generation required to convert a return
+ /// address as stored by the system into the actual address of the
+ /// next instruction that will be executed.
+ ///
+ /// Used by __builtin_extract_return_addr().
+ virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ /// Performs the code-generation required to convert the address
+ /// of an instruction into a return address suitable for storage
+ /// by the system in a return slot.
+ ///
+ /// Used by __builtin_frob_return_addr().
+ virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+ };
+}
+
+#endif // CLANG_CODEGEN_TARGETINFO_H
OpenPOWER on IntegriCloud