summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h133
-rw-r--r--lib/CodeGen/CGBlocks.cpp1037
-rw-r--r--lib/CodeGen/CGBlocks.h223
-rw-r--r--lib/CodeGen/CGBuilder.h26
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1037
-rw-r--r--lib/CodeGen/CGCXX.cpp454
-rw-r--r--lib/CodeGen/CGCXX.h36
-rw-r--r--lib/CodeGen/CGCall.cpp2196
-rw-r--r--lib/CodeGen/CGCall.h104
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp987
-rw-r--r--lib/CodeGen/CGDebugInfo.h126
-rw-r--r--lib/CodeGen/CGDecl.cpp489
-rw-r--r--lib/CodeGen/CGExpr.cpp1324
-rw-r--r--lib/CodeGen/CGExprAgg.cpp554
-rw-r--r--lib/CodeGen/CGExprComplex.cpp663
-rw-r--r--lib/CodeGen/CGExprConstant.cpp588
-rw-r--r--lib/CodeGen/CGExprScalar.cpp1575
-rw-r--r--lib/CodeGen/CGObjC.cpp644
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp1582
-rw-r--r--lib/CodeGen/CGObjCMac.cpp5780
-rw-r--r--lib/CodeGen/CGObjCRuntime.h206
-rw-r--r--lib/CodeGen/CGStmt.cpp1022
-rw-r--r--lib/CodeGen/CGValue.h323
-rw-r--r--lib/CodeGen/CMakeLists.txt24
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp714
-rw-r--r--lib/CodeGen/CodeGenFunction.h900
-rw-r--r--lib/CodeGen/CodeGenModule.cpp1543
-rw-r--r--lib/CodeGen/CodeGenModule.h467
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp614
-rw-r--r--lib/CodeGen/CodeGenTypes.h212
-rw-r--r--lib/CodeGen/Makefile23
-rw-r--r--lib/CodeGen/Mangle.cpp772
-rw-r--r--lib/CodeGen/Mangle.h44
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp100
-rw-r--r--lib/CodeGen/README.txt65
35 files changed, 26587 insertions, 0 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..3de4612
--- /dev/null
+++ b/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,133 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_ABIINFO_H
+#define CLANG_CODEGEN_ABIINFO_H
+
+namespace llvm {
+ class Type;
+}
+
+namespace clang {
+ class ASTContext;
+
+ // FIXME: This is a layering issue if we want to move ABIInfo
+ // down. Fortunately CGFunctionInfo has no real tie to CodeGen.
+ namespace CodeGen {
+ class CGFunctionInfo;
+ class CodeGenFunction;
+ }
+
+ /* FIXME: All of this stuff should be part of the target interface
+ somehow. It is currently here because it is not clear how to factor
+ the targets to support this, since the Targets currently live in a
+ layer below types n'stuff.
+ */
+
+ /// ABIArgInfo - Helper class to encapsulate information about how a
+ /// specific C type should be passed to or returned from a function.
+ class ABIArgInfo {
+ public:
+ enum Kind {
+ Direct, /// Pass the argument directly using the normal
+ /// converted LLVM type. Complex and structure types
+ /// are passed using first class aggregates.
+
+ Indirect, /// Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default
+ /// alignment).
+
+ Ignore, /// Ignore the argument (treat as void). Useful for
+ /// void and empty structs.
+
+ Coerce, /// Only valid for aggregate return types, the argument
+ /// should be accessed by coercion to a provided type.
+
+ Expand, /// Only valid for aggregate argument types. The
+ /// structure should be expanded into consecutive
+ /// arguments for its constituent fields. Currently
+ /// expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable
+ /// types.
+
+ KindFirst=Direct, KindLast=Expand
+ };
+
+ private:
+ Kind TheKind;
+ const llvm::Type *TypeData;
+ unsigned UIntData;
+
+ ABIArgInfo(Kind K, const llvm::Type *TD=0,
+ unsigned UI=0) : TheKind(K),
+ TypeData(TD),
+ UIntData(UI) {}
+ public:
+ ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
+
+ static ABIArgInfo getDirect() {
+ return ABIArgInfo(Direct);
+ }
+ static ABIArgInfo getIgnore() {
+ return ABIArgInfo(Ignore);
+ }
+ static ABIArgInfo getCoerce(const llvm::Type *T) {
+ return ABIArgInfo(Coerce, T);
+ }
+ static ABIArgInfo getIndirect(unsigned Alignment) {
+ return ABIArgInfo(Indirect, 0, Alignment);
+ }
+ static ABIArgInfo getExpand() {
+ return ABIArgInfo(Expand);
+ }
+
+ Kind getKind() const { return TheKind; }
+ bool isDirect() const { return TheKind == Direct; }
+ bool isIgnore() const { return TheKind == Ignore; }
+ bool isCoerce() const { return TheKind == Coerce; }
+ bool isIndirect() const { return TheKind == Indirect; }
+ bool isExpand() const { return TheKind == Expand; }
+
+ // Coerce accessors
+ const llvm::Type *getCoerceToType() const {
+ assert(TheKind == Coerce && "Invalid kind!");
+ return TypeData;
+ }
+
+ // ByVal accessors
+ unsigned getIndirectAlign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return UIntData;
+ }
+
+ void dump() const;
+ };
+
+ /// ABIInfo - Target specific hooks for defining how a type should be
+ /// passed or returned from functions.
+ class ABIInfo {
+ public:
+ virtual ~ABIInfo();
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
+ ASTContext &Ctx) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGen::CodeGenFunction &CGF) const = 0;
+ };
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..ead689c
--- /dev/null
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,1037 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include <algorithm>
+using namespace clang;
+using namespace CodeGen;
+
+llvm::Constant *CodeGenFunction::
+BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
+ const llvm::StructType* Ty,
+ std::vector<HelperInfo> *NoteForHelper) {
+ const llvm::Type *UnsignedLongTy
+ = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *C;
+ std::vector<llvm::Constant*> Elts;
+
+ // reserved
+ C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+ Elts.push_back(C);
+
+ // Size
+ // FIXME: What is the right way to say this doesn't fit? We should give
+ // a user diagnostic in that case. Better fix would be to change the
+ // API to size_t.
+ C = llvm::ConstantInt::get(UnsignedLongTy, Size);
+ Elts.push_back(C);
+
+ if (BlockHasCopyDispose) {
+ // copy_func_helper_decl
+ Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
+
+ // destroy_func_decl
+ Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
+ }
+
+ C = llvm::ConstantStruct::get(Elts);
+
+ C = new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, "__block_descriptor_tmp", &CGM.getModule());
+ return C;
+}
+
+llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock == 0)
+ NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ "_NSConcreteGlobalBlock");
+ return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *BlockModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock == 0)
+ NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ "_NSConcreteStackBlock");
+ return NSConcreteStackBlock;
+}
+
+static void CollectBlockDeclRefInfo(const Stmt *S,
+ CodeGenFunction::BlockInfo &Info) {
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (*I)
+ CollectBlockDeclRefInfo(*I, Info);
+
+ if (const BlockDeclRefExpr *DE = dyn_cast<BlockDeclRefExpr>(S)) {
+ // FIXME: Handle enums.
+ if (isa<FunctionDecl>(DE->getDecl()))
+ return;
+
+ if (DE->isByRef())
+ Info.ByRefDeclRefs.push_back(DE);
+ else
+ Info.ByCopyDeclRefs.push_back(DE);
+ }
+}
+
+/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be
+/// declared as a global variable instead of on the stack.
+static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) {
+ return Info.ByRefDeclRefs.empty() && Info.ByCopyDeclRefs.empty();
+}
+
+// FIXME: Push most into CGM, passing down a few bits, like current function
+// name.
+llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
+
+ std::string Name = CurFn->getName();
+ CodeGenFunction::BlockInfo Info(0, Name.c_str());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
+
+ // Check if the block can be global.
+ // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
+ // to just have one code path. We should move this function into CGM and pass
+ // CGF, then we can just check to see if CGF is 0.
+ if (0 && CanBlockBeGlobal(Info))
+ return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
+
+ std::vector<llvm::Constant*> Elts(5);
+ llvm::Constant *C;
+ llvm::Value *V;
+
+ {
+ // C = BuildBlockStructInitlist();
+ unsigned int flags = BLOCK_HAS_DESCRIPTOR;
+
+ // We run this first so that we set BlockHasCopyDispose from the entire
+ // block literal.
+ // __invoke
+ uint64_t subBlockSize, subBlockAlign;
+ llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
+ bool subBlockHasCopyDispose = false;
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, LocalDeclMap,
+ subBlockSize,
+ subBlockAlign,
+ subBlockDeclRefDecls,
+ subBlockHasCopyDispose);
+ BlockHasCopyDispose |= subBlockHasCopyDispose;
+ Elts[3] = Fn;
+
+ // FIXME: Don't use BlockHasCopyDispose, it is set more often then
+ // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
+ if (subBlockHasCopyDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+
+ // __isa
+ C = CGM.getNSConcreteStackBlock();
+ C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
+ Elts[0] = C;
+
+ // __flags
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ C = llvm::ConstantInt::get(IntTy, flags);
+ Elts[1] = C;
+
+ // __reserved
+ C = llvm::ConstantInt::get(IntTy, 0);
+ Elts[2] = C;
+
+ if (subBlockDeclRefDecls.size() == 0) {
+ // __descriptor
+ Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize, 0, 0);
+
+ // Optimize to being a global block.
+ Elts[0] = CGM.getNSConcreteGlobalBlock();
+ Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
+
+ C = llvm::ConstantStruct::get(Elts);
+
+ char Name[32];
+ sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount());
+ C = new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, Name, &CGM.getModule());
+ QualType BPT = BE->getType();
+ C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
+ return C;
+ }
+
+ std::vector<const llvm::Type *> Types(5+subBlockDeclRefDecls.size());
+ for (int i=0; i<4; ++i)
+ Types[i] = Elts[i]->getType();
+ Types[4] = PtrToInt8Ty;
+
+ for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) {
+ const Expr *E = subBlockDeclRefDecls[i];
+ const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+ QualType Ty = E->getType();
+ if (BDRE && BDRE->isByRef()) {
+ uint64_t Align = getContext().getDeclAlignInBytes(BDRE->getDecl());
+ Types[i+5] = llvm::PointerType::get(BuildByRefType(Ty, Align), 0);
+ } else
+ Types[i+5] = ConvertType(Ty);
+ }
+
+ llvm::StructType *Ty = llvm::StructType::get(Types, true);
+
+ llvm::AllocaInst *A = CreateTempAlloca(Ty);
+ A->setAlignment(subBlockAlign);
+ V = A;
+
+ std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size());
+ int helpersize = 0;
+
+ for (unsigned i=0; i<4; ++i)
+ Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
+
+ for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i)
+ {
+ // FIXME: Push const down.
+ Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]);
+ DeclRefExpr *DR;
+ ValueDecl *VD;
+
+ DR = dyn_cast<DeclRefExpr>(E);
+ // Skip padding.
+ if (DR) continue;
+
+ BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+ VD = BDRE->getDecl();
+
+ llvm::Value* Addr = Builder.CreateStructGEP(V, i+5, "tmp");
+ NoteForHelper[helpersize].index = i+5;
+ NoteForHelper[helpersize].RequiresCopying = BlockRequiresCopying(VD->getType());
+ NoteForHelper[helpersize].flag
+ = VD->getType()->isBlockPointerType() ? BLOCK_FIELD_IS_BLOCK : BLOCK_FIELD_IS_OBJECT;
+
+ if (LocalDeclMap[VD]) {
+ if (BDRE->isByRef()) {
+ NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
+ // FIXME: Someone double check this.
+ (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
+ const llvm::Type *Ty = Types[i+5];
+ llvm::Value *Loc = LocalDeclMap[VD];
+ Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc, false);
+ Loc = Builder.CreateBitCast(Loc, Ty);
+ Builder.CreateStore(Loc, Addr);
+ ++helpersize;
+ continue;
+ } else
+ E = new (getContext()) DeclRefExpr (cast<NamedDecl>(VD),
+ VD->getType(), SourceLocation(),
+ false, false);
+ }
+ if (BDRE->isByRef()) {
+ NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
+ // FIXME: Someone double check this.
+ (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
+ E = new (getContext())
+ UnaryOperator(E, UnaryOperator::AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+ ++helpersize;
+
+ RValue r = EmitAnyExpr(E, Addr, false);
+ if (r.isScalar()) {
+ llvm::Value *Loc = r.getScalarVal();
+ const llvm::Type *Ty = Types[i+5];
+ if (BDRE->isByRef()) {
+ // E is now the address of the value field, instead, we want the
+ // address of the actual ByRef struct. We optimize this slightly
+ // compared to gcc by not grabbing the forwarding slot as this must
+ // be done during Block_copy for us, and we can postpone the work
+ // until then.
+ uint64_t offset = BlockDecls[BDRE->getDecl()];
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+
+ Loc = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ offset),
+ "block.literal");
+ Ty = llvm::PointerType::get(Ty, 0);
+ Loc = Builder.CreateBitCast(Loc, Ty);
+ Loc = Builder.CreateLoad(Loc, false);
+ // Loc = Builder.CreateBitCast(Loc, Ty);
+ }
+ Builder.CreateStore(Loc, Addr);
+ } else if (r.isComplex())
+ // FIXME: implement
+ ErrorUnsupported(BE, "complex in block literal");
+ else if (r.isAggregate())
+ ; // Already created into the destination
+ else
+ assert (0 && "bad block variable");
+ // FIXME: Ensure that the offset created by the backend for
+ // the struct matches the previously computed offset in BlockDecls.
+ }
+ NoteForHelper.resize(helpersize);
+
+ // __descriptor
+ llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose,
+ subBlockSize, Ty,
+ &NoteForHelper);
+ Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
+ Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
+ }
+
+ QualType BPT = BE->getType();
+ return Builder.CreateBitCast(V, ConvertType(BPT));
+}
+
+
+const llvm::Type *BlockModule::getBlockDescriptorType() {
+ if (BlockDescriptorType)
+ return BlockDescriptorType;
+
+ const llvm::Type *UnsignedLongTy =
+ getTypes().ConvertType(getContext().UnsignedLongTy);
+
+ // struct __block_descriptor {
+ // unsigned long reserved;
+ // unsigned long block_size;
+ // };
+ BlockDescriptorType = llvm::StructType::get(UnsignedLongTy,
+ UnsignedLongTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_descriptor",
+ BlockDescriptorType);
+
+ return BlockDescriptorType;
+}
+
+const llvm::Type *BlockModule::getGenericBlockLiteralType() {
+ if (GenericBlockLiteralType)
+ return GenericBlockLiteralType;
+
+ const llvm::Type *BlockDescPtrTy =
+ llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
+ IntTy,
+ IntTy,
+ PtrToInt8Ty,
+ BlockDescPtrTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_literal_generic",
+ GenericBlockLiteralType);
+
+ return GenericBlockLiteralType;
+}
+
+const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
+ if (GenericExtendedBlockLiteralType)
+ return GenericExtendedBlockLiteralType;
+
+ const llvm::Type *BlockDescPtrTy =
+ llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // void *__copy_func_helper_decl;
+ // void *__destroy_func_decl;
+ // };
+ GenericExtendedBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
+ IntTy,
+ IntTy,
+ PtrToInt8Ty,
+ BlockDescPtrTy,
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ NULL);
+
+ getModule().addTypeName("struct.__block_literal_extended_generic",
+ GenericExtendedBlockLiteralType);
+
+ return GenericExtendedBlockLiteralType;
+}
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
+ const BlockPointerType *BPT =
+ E->getCallee()->getType()->getAsBlockPointerType();
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+ // Get a pointer to the generic block literal.
+ const llvm::Type *BlockLiteralTy =
+ llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+ // Bitcast the callee to a block literal.
+ llvm::Value *BlockLiteral =
+ Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
+
+ BlockLiteral =
+ Builder.CreateBitCast(BlockLiteral,
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty),
+ "tmp");
+
+ // Add the block literal.
+ QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy));
+
+ QualType FnType = BPT->getPointeeType();
+
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAsFunctionProtoType(),
+ E->arg_begin(), E->arg_end());
+
+ // Load the function.
+ llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp");
+
+ QualType ResultType = FnType->getAsFunctionType()->getResultType();
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, Args);
+
+ // Cast the function pointer to the right type.
+ const llvm::Type *BlockFTy =
+ CGM.getTypes().GetFunctionType(FnInfo, false);
+
+ const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+ // And call the block.
+ return EmitCall(FnInfo, Func, Args);
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
+ uint64_t &offset = BlockDecls[E->getDecl()];
+
+ const llvm::Type *Ty;
+ Ty = CGM.getTypes().ConvertType(E->getDecl()->getType());
+
+ // See if we have already allocated an offset for this variable.
+ if (offset == 0) {
+ // Don't run the expensive check, unless we have to.
+ if (!BlockHasCopyDispose && BlockRequiresCopying(E->getType()))
+ BlockHasCopyDispose = true;
+ // if not, allocate one now.
+ offset = getBlockOffset(E);
+ }
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+ llvm::Value *V = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ offset),
+ "block.literal");
+ if (E->isByRef()) {
+ bool needsCopyDispose = BlockRequiresCopying(E->getType());
+ uint64_t Align = getContext().getDeclAlignInBytes(E->getDecl());
+ const llvm::Type *PtrStructTy
+ = llvm::PointerType::get(BuildByRefType(E->getType(), Align), 0);
+ // The block literal will need a copy/destroy helper.
+ BlockHasCopyDispose = true;
+ Ty = PtrStructTy;
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ V = Builder.CreateLoad(V, false);
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateLoad(V, false);
+ V = Builder.CreateBitCast(V, PtrStructTy);
+ V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x");
+ } else {
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ }
+ return V;
+}
+
+void CodeGenFunction::BlockForwardSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
+ llvm::Value *&DMEntry = LocalDeclMap[SelfDecl];
+ if (DMEntry)
+ return;
+ // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care
+ BlockDeclRefExpr *BDRE = new (getContext())
+ BlockDeclRefExpr(SelfDecl,
+ SelfDecl->getType(), SourceLocation(), false);
+ DMEntry = GetAddrOfBlockDecl(BDRE);
+}
+
+llvm::Constant *
+BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
+ // Generate the block descriptor.
+ const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ llvm::Constant *DescriptorFields[2];
+
+ // Reserved
+ DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
+
+ // Block literal size. For global blocks we just use the size of the generic
+ // block literal struct.
+ uint64_t BlockLiteralSize =
+ TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8;
+ DescriptorFields[1] = llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize);
+
+ llvm::Constant *DescriptorStruct =
+ llvm::ConstantStruct::get(&DescriptorFields[0], 2);
+
+ llvm::GlobalVariable *Descriptor =
+ new llvm::GlobalVariable(DescriptorStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ DescriptorStruct, "__block_descriptor_global",
+ &getModule());
+
+ // Generate the constants for the block literal.
+ llvm::Constant *LiteralFields[5];
+
+ CodeGenFunction::BlockInfo Info(0, n);
+ uint64_t subBlockSize, subBlockAlign;
+ llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
+ bool subBlockHasCopyDispose = false;
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap,
+ subBlockSize,
+ subBlockAlign,
+ subBlockDeclRefDecls,
+ subBlockHasCopyDispose);
+ assert(subBlockSize == BlockLiteralSize
+ && "no imports allowed for global block");
+
+ // isa
+ LiteralFields[0] = getNSConcreteGlobalBlock();
+
+ // Flags
+ LiteralFields[1] =
+ llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR);
+
+ // Reserved
+ LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
+
+ // Function
+ LiteralFields[3] = Fn;
+
+ // Descriptor
+ LiteralFields[4] = Descriptor;
+
+ llvm::Constant *BlockLiteralStruct =
+ llvm::ConstantStruct::get(&LiteralFields[0], 5);
+
+ llvm::GlobalVariable *BlockLiteral =
+ new llvm::GlobalVariable(BlockLiteralStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ BlockLiteralStruct, "__block_literal_global",
+ &getModule());
+
+ return BlockLiteral;
+}
+
+llvm::Value *CodeGenFunction::LoadBlockStruct() {
+ return Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], "self");
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
+ const BlockInfo& Info,
+ const Decl *OuterFuncDecl,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm,
+ uint64_t &Size,
+ uint64_t &Align,
+ llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
+ bool &subBlockHasCopyDispose) {
+
+ // Check if we should generate debug info for this block.
+ if (CGM.getDebugInfo())
+ DebugInfo = CGM.getDebugInfo();
+
+ // Arrange for local static and local extern declarations to appear
+ // to be local to this function as well, as they are directly referenced
+ // in a block.
+ for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
+ i != ldm.end();
+ ++i) {
+ const VarDecl *VD = dyn_cast<VarDecl>(i->first);
+
+ if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
+ LocalDeclMap[VD] = i->second;
+ }
+
+ // FIXME: We need to rearrange the code for copy/dispose so we have this
+ // sooner, so we can calculate offsets correctly.
+ if (!BlockHasCopyDispose)
+ BlockOffset = CGM.getTargetData()
+ .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8;
+ else
+ BlockOffset = CGM.getTargetData()
+ .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8;
+ BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
+
+ const FunctionType *BlockFunctionType = BExpr->getFunctionType();
+ QualType ResultType;
+ bool IsVariadic;
+ if (const FunctionProtoType *FTy =
+ dyn_cast<FunctionProtoType>(BlockFunctionType)) {
+ ResultType = FTy->getResultType();
+ IsVariadic = FTy->isVariadic();
+ }
+ else {
+ // K&R style block.
+ ResultType = BlockFunctionType->getResultType();
+ IsVariadic = false;
+ }
+
+ FunctionArgList Args;
+
+ const BlockDecl *BD = BExpr->getBlockDecl();
+
+ // FIXME: This leaks
+ ImplicitParamDecl *SelfDecl =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
+ BlockStructDecl = SelfDecl;
+
+ for (BlockDecl::param_const_iterator i = BD->param_begin(),
+ e = BD->param_end(); i != e; ++i)
+ Args.push_back(std::make_pair(*i, (*i)->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(ResultType, Args);
+
+ std::string Name = std::string("__") + Info.Name + "_block_invoke_";
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ CGM.SetInternalFunctionAttributes(BD, Fn, FI);
+
+ StartFunction(BD, ResultType, Fn, Args,
+ BExpr->getBody()->getLocEnd());
+ CurFuncDecl = OuterFuncDecl;
+ CurCodeDecl = BD;
+ EmitStmt(BExpr->getBody());
+ FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
+
+ // The runtime needs a minimum alignment of a void *.
+ uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
+ BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign);
+
+ Size = BlockOffset;
+ Align = BlockAlign;
+ subBlockDeclRefDecls = BlockDeclRefDecls;
+ subBlockHasCopyDispose |= BlockHasCopyDispose;
+ return Fn;
+}
+
+uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
+ const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl());
+
+ uint64_t Size = getContext().getTypeSize(D->getType()) / 8;
+ uint64_t Align = getContext().getDeclAlignInBytes(D);
+
+ if (BDRE->isByRef()) {
+ Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8;
+ Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
+ }
+
+ assert ((Align > 0) && "alignment must be 1 byte or more");
+
+ uint64_t OldOffset = BlockOffset;
+
+ // Ensure proper alignment, even if it means we have to have a gap
+ BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align);
+ BlockAlign = std::max(Align, BlockAlign);
+
+ uint64_t Pad = BlockOffset - OldOffset;
+ if (Pad) {
+ llvm::ArrayType::get(llvm::Type::Int8Ty, Pad);
+ QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
+ llvm::APInt(32, Pad),
+ ArrayType::Normal, 0);
+ ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
+ 0, QualType(PadTy), VarDecl::None,
+ SourceLocation());
+ Expr *E;
+ E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
+ SourceLocation(), false, false);
+ BlockDeclRefDecls.push_back(E);
+ }
+ BlockDeclRefDecls.push_back(BDRE);
+
+ BlockOffset += Size;
+ return BlockOffset-Size;
+}
+
+llvm::Constant *BlockFunction::
+GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__copy_helper_block_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
+ DstObj = Builder.CreateLoad(DstObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
+ Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
+
+ llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ llvm::Value *F = getBlockObjectAssign();
+ Builder.CreateCall3(F, Dstv, Srcv, N);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::
+GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
+ const llvm::StructType* T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__destroy_helper_block_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ BuildBlockRelease(Srcv, flag);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelper) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelper);
+}
+
+llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelperp);
+}
+
+llvm::Constant *BlockFunction::
+GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__Block_byref_id_object_copy_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ // dst->x
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
+
+ // src->x
+ V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateBitCast(V, T);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ llvm::Value *SrcObj = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+
+ llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ llvm::Value *F = getBlockObjectAssign();
+ Builder.CreateCall3(F, DstObj, SrcObj, N);
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *
+BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__Block_byref_id_object_dispose_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ V = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+ BuildBlockRelease(V, flag);
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
+ int flag) {
+ return CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
+ int flag) {
+ return CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag);
+}
+
+llvm::Value *BlockFunction::getBlockObjectDispose() {
+ if (CGM.BlockObjectDispose == 0) {
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::VoidTy;
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::Int32Ty);
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ CGM.BlockObjectDispose
+ = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
+ }
+ return CGM.BlockObjectDispose;
+}
+
+llvm::Value *BlockFunction::getBlockObjectAssign() {
+ if (CGM.BlockObjectAssign == 0) {
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::VoidTy;
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::Int32Ty);
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ CGM.BlockObjectAssign
+ = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
+ }
+ return CGM.BlockObjectAssign;
+}
+
+void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
+ llvm::Value *F = getBlockObjectDispose();
+ llvm::Value *N;
+ V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ Builder.CreateCall2(F, V, N);
+}
+
+ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
+
+BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
+ CGBuilderTy &B)
+ : CGM(cgm), CGF(cgf), Builder(B) {
+ PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ BlockHasCopyDispose = false;
+}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..56d3a2d
--- /dev/null
+++ b/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,223 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBLOCKS_H
+#define CLANG_CODEGEN_CGBLOCKS_H
+
+#include "CodeGenTypes.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+#include <vector>
+#include <map>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class Value;
+}
+
+namespace clang {
+
+namespace CodeGen {
+class CodeGenModule;
+
+class BlockBase {
+public:
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+ };
+};
+
+class BlockModule : public BlockBase {
+ ASTContext &Context;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ CodeGenTypes &Types;
+ CodeGenModule &CGM;
+
+ ASTContext &getContext() const { return Context; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+public:
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
+ const llvm::Type *getBlockDescriptorType();
+
+ const llvm::Type *getGenericBlockLiteralType();
+ const llvm::Type *getGenericExtendedBlockLiteralType();
+
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+ /// NSConcreteGlobalBlock - Cached reference to the class pointer for global
+ /// blocks.
+ llvm::Constant *NSConcreteGlobalBlock;
+
+ /// NSConcreteStackBlock - Cached reference to the class poinnter for stack
+ /// blocks.
+ llvm::Constant *NSConcreteStackBlock;
+
+ const llvm::Type *BlockDescriptorType;
+ const llvm::Type *GenericBlockLiteralType;
+ const llvm::Type *GenericExtendedBlockLiteralType;
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ llvm::Value *BlockObjectAssign;
+ llvm::Value *BlockObjectDispose;
+ const llvm::Type *PtrToInt8Ty;
+
+ BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
+ CodeGenTypes &T, CodeGenModule &CodeGen)
+ : Context(C), TheModule(M), TheTargetData(TD), Types(T),
+ CGM(CodeGen),
+ NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
+ GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0),
+ BlockObjectAssign(0), BlockObjectDispose(0) {
+ Block.GlobalUniqueCount = 0;
+ PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ }
+};
+
+class BlockFunction : public BlockBase {
+ CodeGenModule &CGM;
+ CodeGenFunction &CGF;
+ ASTContext &getContext() const;
+
+public:
+ const llvm::Type *PtrToInt8Ty;
+ struct HelperInfo {
+ int index;
+ int flag;
+ bool RequiresCopying;
+ };
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128 /* called from __block (byref) copy/dispose
+ support routines */
+ };
+
+ /// BlockInfo - Information to generate a block literal.
+ struct BlockInfo {
+ /// BlockLiteralTy - The type of the block literal.
+ const llvm::Type *BlockLiteralTy;
+
+ /// Name - the name of the function this block was created for, if any.
+ const char *Name;
+
+ /// ByCopyDeclRefs - Variables from parent scopes that have been imported
+ /// into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> ByCopyDeclRefs;
+
+ // ByRefDeclRefs - __block variables from parent scopes that have been
+ // imported into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> ByRefDeclRefs;
+
+ BlockInfo(const llvm::Type *blt, const char *n)
+ : BlockLiteralTy(blt), Name(n) {
+ // Skip asm prefix, if any.
+ if (Name && Name[0] == '\01')
+ ++Name;
+ }
+ };
+
+ CGBuilderTy &Builder;
+
+ BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
+
+ /// BlockOffset - The offset in bytes for the next allocation of an
+ /// imported block variable.
+ uint64_t BlockOffset;
+ /// BlockAlign - Maximal alignment needed for the Block expressed in bytes.
+ uint64_t BlockAlign;
+
+ /// getBlockOffset - Allocate an offset for the ValueDecl from a
+ /// BlockDeclRefExpr in a block literal (BlockExpr).
+ uint64_t getBlockOffset(const BlockDeclRefExpr *E);
+
+ /// BlockHasCopyDispose - True iff the block uses copy/dispose.
+ bool BlockHasCopyDispose;
+
+ /// BlockDeclRefDecls - Decls from BlockDeclRefExprs in apperance order
+ /// in a block literal. Decls without names are used for padding.
+ llvm::SmallVector<const Expr *, 8> BlockDeclRefDecls;
+
+ /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
+ std::map<const Decl*, uint64_t> BlockDecls;
+
+ ImplicitParamDecl *BlockStructDecl;
+ ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
+
+ llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *BuildCopyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *BuildDestroyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
+ llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
+
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag);
+
+ llvm::Value *getBlockObjectAssign();
+ llvm::Value *getBlockObjectDispose();
+ void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
+
+ bool BlockRequiresCopying(QualType Ty) {
+ if (Ty->isBlockPointerType())
+ return true;
+ if (getContext().isObjCNSObjectType(Ty))
+ return true;
+ if (getContext().isObjCObjectPointerType(Ty))
+ return true;
+ return false;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..ed56bd9
--- /dev/null
+++ b/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,26 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBUILDER_H
+#define CLANG_CODEGEN_CGBUILDER_H
+
+#include "llvm/Support/IRBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+ // Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+ typedef llvm::IRBuilder<false> CGBuilderTy;
+#else
+ typedef llvm::IRBuilder<> CGBuilderTy;
+#endif
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..d813bba
--- /dev/null
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,1037 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/TargetBuiltins.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
+ Intrinsic::ID Id, const CallExpr *E) {
+ const llvm::Type *ResType[2];
+ ResType[0] = CGF.ConvertType(E->getType());
+ ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+ return RValue::get(CGF.Builder.CreateCall2(AtomF,
+ CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1))));
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+// the expression node, where the return value is the result of the
+// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
+ Intrinsic::ID Id, const CallExpr *E,
+ Instruction::BinaryOps Op) {
+ const llvm::Type *ResType[2];
+ ResType[0] = CGF.ConvertType(E->getType());
+ ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
+ Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
+
+ if (Id == Intrinsic::atomic_load_nand)
+ Result = CGF.Builder.CreateNot(Result);
+
+
+ return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E) {
+ // See if we can constant fold this builtin. If so, don't emit it at all.
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGM.getContext())) {
+ if (Result.Val.isInt())
+ return RValue::get(llvm::ConstantInt::get(Result.Val.getInt()));
+ else if (Result.Val.isFloat())
+ return RValue::get(llvm::ConstantFP::get(Result.Val.getFloat()));
+ }
+
+ switch (BuiltinID) {
+ default: break; // Handle intrinsics and libm functions below.
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__builtin_va_end: {
+ Value *ArgValue = EmitVAListRef(E->getArg(0));
+ const llvm::Type *DestType =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (ArgValue->getType() != DestType)
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue->getNameStart());
+
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::vaend : Intrinsic::vastart;
+ return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
+ }
+ case Builtin::BI__builtin_va_copy: {
+ Value *DstPtr = EmitVAListRef(E->getArg(0));
+ Value *SrcPtr = EmitVAListRef(E->getArg(1));
+
+ const llvm::Type *Type =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ DstPtr = Builder.CreateBitCast(DstPtr, Type);
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ DstPtr, SrcPtr));
+ }
+ case Builtin::BI__builtin_abs: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue, Constant::getNullValue(ArgValue->getType()),
+ "abscond");
+ Value *Result =
+ Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ // ffs(x) -> x ? cttz(x) + 1 : 0
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
+ ConstantInt::get(ArgType, 1), "tmp");
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ // parity(x) -> ctpop(x) & 1
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
+ Value *Result = Builder.CreateAnd(Tmp, ConstantInt::get(ArgType, 1),
+ "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_expect:
+ // FIXME: pass expect through to LLVM
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
+ return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
+ }
+ case Builtin::BI__builtin_object_size: {
+ // FIXME: Implement. For now we just always fail and pretend we
+ // don't know the object size.
+ llvm::APSInt TypeArg = E->getArg(1)->EvaluateAsInt(CGM.getContext());
+ const llvm::Type *ResType = ConvertType(E->getType());
+ // bool UseSubObject = TypeArg.getZExtValue() & 1;
+ bool UseMinimum = TypeArg.getZExtValue() & 2;
+ return RValue::get(ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
+ }
+ case Builtin::BI__builtin_prefetch: {
+ Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+ // FIXME: Technically these constants should of type 'int', yes?
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ ConstantInt::get(llvm::Type::Int32Ty, 3);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
+ return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
+ }
+ case Builtin::BI__builtin_trap: {
+ Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+
+ case Builtin::BI__builtin_powi:
+ case Builtin::BI__builtin_powif:
+ case Builtin::BI__builtin_powil: {
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered: {
+ // Ordered comparisons: we know the arguments to these are matching scalar
+ // floating point values.
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unknown ordered comparison");
+ case Builtin::BI__builtin_isgreater:
+ LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isgreaterequal:
+ LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isless:
+ LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessequal:
+ LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessgreater:
+ LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isunordered:
+ LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+ break;
+ }
+ // ZExt bool to int type.
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
+ "tmp"));
+ }
+ case Builtin::BIalloca:
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: LLVM IR Should allow alloca with an i64 size!
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ Size = Builder.CreateIntCast(Size, llvm::Type::Int32Ty, false, "tmp");
+ return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty, Size, "tmp"));
+ }
+ case Builtin::BI__builtin_bzero: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemSetFn(), Address,
+ llvm::ConstantInt::get(llvm::Type::Int8Ty, 0),
+ EmitScalarExpr(E->getArg(1)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_memcpy: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemCpyFn(), Address,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_memmove: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemMoveFn(), Address,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_memset: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemSetFn(), Address,
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ llvm::Type::Int8Ty),
+ EmitScalarExpr(E->getArg(2)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_return_address: {
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
+ }
+ case Builtin::BI__builtin_frame_address: {
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
+ }
+ case Builtin::BI__builtin_extract_return_addr: {
+ // FIXME: There should be a target hook for this
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
+ case Builtin::BI__builtin_unwind_init: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+#if 0
+ // FIXME: Finish/enable when LLVM backend support stabilizes
+ case Builtin::BI__builtin_setjmp: {
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ // Store the frame pointer to the buffer
+ Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ Value *FrameAddr =
+ Builder.CreateCall(FrameAddrF,
+ Constant::getNullValue(llvm::Type::Int32Ty));
+ Builder.CreateStore(FrameAddr, Buf);
+ // Call the setjmp intrinsic
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
+ const llvm::Type *DestType =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Buf = Builder.CreateBitCast(Buf, DestType);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+ case Builtin::BI__builtin_longjmp: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *DestType =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Buf = Builder.CreateBitCast(Buf, DestType);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+#endif
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ assert(0 && "Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ llvm::Instruction::Add);
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
+ llvm::Instruction::Sub);
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
+ llvm::Instruction::And);
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
+ llvm::Instruction::Or);
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
+ llvm::Instruction::Xor);
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
+ llvm::Instruction::And);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ {
+ const llvm::Type *ResType[2];
+ ResType[0]= ConvertType(E->getType());
+ ResType[1] = ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+ return RValue::get(Builder.CreateCall3(AtomF,
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2))));
+ }
+
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ {
+ const llvm::Type *ResType[2];
+ ResType[0]= ConvertType(E->getArg(1)->getType());
+ ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+ Value *OldVal = EmitScalarExpr(E->getArg(1));
+ Value *PrevVal = Builder.CreateCall3(AtomF,
+ EmitScalarExpr(E->getArg(0)),
+ OldVal,
+ EmitScalarExpr(E->getArg(2)));
+ Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+ // zext bool to int.
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ElTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr, true);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__sync_synchronize: {
+ Value *C[5];
+ C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 1);
+ C[4] = ConstantInt::get(llvm::Type::Int1Ty, 0);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ return RValue::get(0);
+ }
+
+ // Library functions with special handling.
+ case Builtin::BIsqrt:
+ case Builtin::BIsqrtf:
+ case Builtin::BIsqrtl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgType = Arg0->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
+ return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
+ }
+
+ case Builtin::BIpow:
+ case Builtin::BIpowf:
+ case Builtin::BIpowl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+ }
+
+ // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
+ // that function.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
+ getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return EmitCall(CGM.getBuiltinLibFunction(BuiltinID),
+ E->getCallee()->getType(), E->arg_begin(),
+ E->arg_end());
+
+ // See if we have a target specific intrinsic.
+ const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+ Intrinsic::ID IntrinsicID =
+ Intrinsic::getIntrinsicForGCCBuiltin(Target.getTargetPrefix(), Name);
+
+ if (IntrinsicID != Intrinsic::not_intrinsic) {
+ SmallVector<Value*, 16> Args;
+
+ Function *F = CGM.getIntrinsic(IntrinsicID);
+ const llvm::FunctionType *FTy = F->getFunctionType();
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Value *ArgValue = EmitScalarExpr(E->getArg(i));
+
+ // If the intrinsic arg type is different from the builtin arg type
+ // we need to do a bit cast.
+ const llvm::Type *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType()) {
+ assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+ "Must be able to losslessly bit cast to param");
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ }
+
+ Args.push_back(ArgValue);
+ }
+
+ Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+ QualType BuiltinRetType = E->getType();
+
+ const llvm::Type *RetTy = llvm::Type::VoidTy;
+ if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
+
+ if (RetTy != V->getType()) {
+ assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+ "Must be able to losslessly bit cast result type");
+ V = Builder.CreateBitCast(V, RetTy);
+ }
+
+ return RValue::get(V);
+ }
+
+ // See if we have a target specific builtin that needs to be lowered.
+ if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
+ ErrorUnsupported(E, "builtin function");
+
+ // Unknown builtin, for now just dump it out and return undef.
+ if (hasAggregateLLVMType(E->getType()))
+ return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
+ return RValue::get(UndefValue::get(ConvertType(E->getType())));
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ const char *TargetPrefix = Target.getTargetPrefix();
+ if (strcmp(TargetPrefix, "x86") == 0)
+ return EmitX86BuiltinExpr(BuiltinID, E);
+ else if (strcmp(TargetPrefix, "ppc") == 0)
+ return EmitPPCBuiltinExpr(BuiltinID, E);
+ return 0;
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ switch (BuiltinID) {
+ default: return 0;
+ case X86::BI__builtin_ia32_mulps:
+ return Builder.CreateMul(Ops[0], Ops[1], "mulps");
+ case X86::BI__builtin_ia32_mulpd:
+ return Builder.CreateMul(Ops[0], Ops[1], "mulpd");
+ case X86::BI__builtin_ia32_pand:
+ case X86::BI__builtin_ia32_pand128:
+ return Builder.CreateAnd(Ops[0], Ops[1], "pand");
+ case X86::BI__builtin_ia32_por:
+ case X86::BI__builtin_ia32_por128:
+ return Builder.CreateOr(Ops[0], Ops[1], "por");
+ case X86::BI__builtin_ia32_pxor:
+ case X86::BI__builtin_ia32_pxor128:
+ return Builder.CreateXor(Ops[0], Ops[1], "pxor");
+ case X86::BI__builtin_ia32_pandn:
+ case X86::BI__builtin_ia32_pandn128:
+ Ops[0] = Builder.CreateNot(Ops[0], "tmp");
+ return Builder.CreateAnd(Ops[0], Ops[1], "pandn");
+ case X86::BI__builtin_ia32_paddb:
+ case X86::BI__builtin_ia32_paddb128:
+ case X86::BI__builtin_ia32_paddd:
+ case X86::BI__builtin_ia32_paddd128:
+ case X86::BI__builtin_ia32_paddq:
+ case X86::BI__builtin_ia32_paddq128:
+ case X86::BI__builtin_ia32_paddw:
+ case X86::BI__builtin_ia32_paddw128:
+ case X86::BI__builtin_ia32_addps:
+ case X86::BI__builtin_ia32_addpd:
+ return Builder.CreateAdd(Ops[0], Ops[1], "add");
+ case X86::BI__builtin_ia32_psubb:
+ case X86::BI__builtin_ia32_psubb128:
+ case X86::BI__builtin_ia32_psubd:
+ case X86::BI__builtin_ia32_psubd128:
+ case X86::BI__builtin_ia32_psubq:
+ case X86::BI__builtin_ia32_psubq128:
+ case X86::BI__builtin_ia32_psubw:
+ case X86::BI__builtin_ia32_psubw128:
+ case X86::BI__builtin_ia32_subps:
+ case X86::BI__builtin_ia32_subpd:
+ return Builder.CreateSub(Ops[0], Ops[1], "sub");
+ case X86::BI__builtin_ia32_divps:
+ return Builder.CreateFDiv(Ops[0], Ops[1], "divps");
+ case X86::BI__builtin_ia32_divpd:
+ return Builder.CreateFDiv(Ops[0], Ops[1], "divpd");
+ case X86::BI__builtin_ia32_pmullw:
+ case X86::BI__builtin_ia32_pmullw128:
+ return Builder.CreateMul(Ops[0], Ops[1], "pmul");
+ case X86::BI__builtin_ia32_punpckhbw:
+ return EmitShuffleVector(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15,
+ "punpckhbw");
+ case X86::BI__builtin_ia32_punpckhbw128:
+ return EmitShuffleVector(Ops[0], Ops[1], 8, 24, 9, 25, 10, 26, 11, 27,
+ 12, 28, 13, 29, 14, 30, 15, 31,
+ "punpckhbw");
+ case X86::BI__builtin_ia32_punpckhwd:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "punpckhwd");
+ case X86::BI__builtin_ia32_punpckhwd128:
+ return EmitShuffleVector(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15,
+ "punpckhwd");
+ case X86::BI__builtin_ia32_punpckhdq:
+ return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "punpckhdq");
+ case X86::BI__builtin_ia32_punpckhdq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "punpckhdq");
+ case X86::BI__builtin_ia32_punpckhqdq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "punpckhqdq");
+ case X86::BI__builtin_ia32_punpcklbw:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11,
+ "punpcklbw");
+ case X86::BI__builtin_ia32_punpcklwd:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "punpcklwd");
+ case X86::BI__builtin_ia32_punpckldq:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "punpckldq");
+ case X86::BI__builtin_ia32_punpckldq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "punpckldq");
+ case X86::BI__builtin_ia32_punpcklqdq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "punpcklqdq");
+ case X86::BI__builtin_ia32_pslldi128:
+ case X86::BI__builtin_ia32_psllqi128:
+ case X86::BI__builtin_ia32_psllwi128:
+ case X86::BI__builtin_ia32_psradi128:
+ case X86::BI__builtin_ia32_psrawi128:
+ case X86::BI__builtin_ia32_psrldi128:
+ case X86::BI__builtin_ia32_psrlqi128:
+ case X86::BI__builtin_ia32_psrlwi128: {
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
+ Ops[1], Zero, "insert");
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi128:
+ name = "pslldi";
+ ID = Intrinsic::x86_sse2_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi128:
+ name = "psllqi";
+ ID = Intrinsic::x86_sse2_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi128:
+ name = "psllwi";
+ ID = Intrinsic::x86_sse2_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi128:
+ name = "psradi";
+ ID = Intrinsic::x86_sse2_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi128:
+ name = "psrawi";
+ ID = Intrinsic::x86_sse2_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi128:
+ name = "psrldi";
+ ID = Intrinsic::x86_sse2_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi128:
+ name = "psrlqi";
+ ID = Intrinsic::x86_sse2_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi128:
+ name = "psrlwi";
+ ID = Intrinsic::x86_sse2_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_pslldi:
+ case X86::BI__builtin_ia32_psllqi:
+ case X86::BI__builtin_ia32_psllwi:
+ case X86::BI__builtin_ia32_psradi:
+ case X86::BI__builtin_ia32_psrawi:
+ case X86::BI__builtin_ia32_psrldi:
+ case X86::BI__builtin_ia32_psrlqi:
+ case X86::BI__builtin_ia32_psrlwi: {
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi:
+ name = "pslldi";
+ ID = Intrinsic::x86_mmx_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi:
+ name = "psllqi";
+ ID = Intrinsic::x86_mmx_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi:
+ name = "psllwi";
+ ID = Intrinsic::x86_mmx_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi:
+ name = "psradi";
+ ID = Intrinsic::x86_mmx_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi:
+ name = "psrawi";
+ ID = Intrinsic::x86_mmx_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi:
+ name = "psrldi";
+ ID = Intrinsic::x86_mmx_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi:
+ name = "psrlqi";
+ ID = Intrinsic::x86_mmx_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi:
+ name = "psrlwi";
+ ID = Intrinsic::x86_mmx_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_pshufw: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0],
+ i & 0x3, (i & 0xc) >> 2,
+ (i & 0x30) >> 4, (i & 0xc0) >> 6,
+ "pshufw");
+ }
+ case X86::BI__builtin_ia32_pshuflw: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0],
+ i & 0x3, (i & 0xc) >> 2,
+ (i & 0x30) >> 4, (i & 0xc0) >> 6, 4, 5, 6, 7,
+ "pshuflw");
+ }
+ case X86::BI__builtin_ia32_pshufhw: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0], 0, 1, 2, 3,
+ 4 + (i & 0x3), 4 + ((i & 0xc) >> 2),
+ 4 + ((i & 0x30) >> 4), 4 + ((i & 0xc0) >> 6),
+ "pshufhw");
+ }
+ case X86::BI__builtin_ia32_pshufd: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0],
+ i & 0x3, (i & 0xc) >> 2,
+ (i & 0x30) >> 4, (i & 0xc0) >> 6,
+ "pshufd");
+ }
+ case X86::BI__builtin_ia32_vec_init_v4hi:
+ case X86::BI__builtin_ia32_vec_init_v8qi:
+ case X86::BI__builtin_ia32_vec_init_v2si:
+ return EmitVector(&Ops[0], Ops.size());
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v2df:
+ return Builder.CreateExtractElement(Ops[0], Ops[1], "result");
+ case X86::BI__builtin_ia32_cmpps: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+ }
+ case X86::BI__builtin_ia32_cmpss: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+ }
+ case X86::BI__builtin_ia32_ldmxcsr: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
+ Builder.CreateStore(Ops[0], Tmp);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ }
+ case X86::BI__builtin_ia32_stmxcsr: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
+ One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ return Builder.CreateLoad(Tmp, "stmxcsr");
+ }
+ case X86::BI__builtin_ia32_cmppd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+ }
+ case X86::BI__builtin_ia32_cmpsd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+ }
+ case X86::BI__builtin_ia32_movss:
+ return EmitShuffleVector(Ops[0], Ops[1], 4, 1, 2, 3, "movss");
+ case X86::BI__builtin_ia32_shufps: {
+ unsigned i = cast<ConstantInt>(Ops[2])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[1],
+ i & 0x3, (i & 0xc) >> 2,
+ ((i & 0x30) >> 4) + 4,
+ ((i & 0xc0) >> 6) + 4, "shufps");
+ }
+ case X86::BI__builtin_ia32_shufpd: {
+ unsigned i = cast<ConstantInt>(Ops[2])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[1], i & 1,
+ ((i & 2) >> 1)+2, "shufpd");
+ }
+ case X86::BI__builtin_ia32_punpcklbw128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 16, 1, 17, 2, 18, 3, 19,
+ 4, 20, 5, 21, 6, 22, 7, 23,
+ "punpcklbw");
+ case X86::BI__builtin_ia32_punpcklwd128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11,
+ "punpcklwd");
+ case X86::BI__builtin_ia32_movlhps:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 1, 4, 5, "movlhps");
+ case X86::BI__builtin_ia32_movhlps:
+ return EmitShuffleVector(Ops[0], Ops[1], 6, 7, 2, 3, "movhlps");
+ case X86::BI__builtin_ia32_unpckhps:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "unpckhps");
+ case X86::BI__builtin_ia32_unpcklps:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "unpcklps");
+ case X86::BI__builtin_ia32_unpckhpd:
+ return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "unpckhpd");
+ case X86::BI__builtin_ia32_unpcklpd:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "unpcklpd");
+ case X86::BI__builtin_ia32_movsd:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 1, "movsd");
+ case X86::BI__builtin_ia32_loadlps:
+ case X86::BI__builtin_ia32_loadhps: {
+ // FIXME: This should probably be represented as
+ // shuffle (dst, (v4f32 (insert undef, (load i64), 0)), shuf mask hi/lo)
+ const llvm::Type *EltTy = llvm::Type::DoubleTy;
+ const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+ const llvm::Type *OrigTy = Ops[0]->getType();
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_loadlps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
+ Ops[1] = Builder.CreateBitCast(Ops[1], llvm::PointerType::getUnqual(EltTy));
+ Ops[1] = Builder.CreateLoad(Ops[1], "tmp");
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[0] = Builder.CreateInsertElement(Ops[0], Ops[1], Idx, "loadps");
+ return Builder.CreateBitCast(Ops[0], OrigTy, "loadps");
+ }
+ case X86::BI__builtin_ia32_loadlpd:
+ case X86::BI__builtin_ia32_loadhpd: {
+ Ops[1] = Builder.CreateLoad(Ops[1], "tmp");
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_loadlpd ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Idx, "loadpd");
+ }
+ case X86::BI__builtin_ia32_storehps:
+ case X86::BI__builtin_ia32_storelps: {
+ const llvm::Type *EltTy = llvm::Type::Int64Ty;
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
+ llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+
+ // cast val v2i64
+ Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+ // extract (0, 1)
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+ // cast pointer to i64 & store
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case X86::BI__builtin_ia32_loadlv4si: {
+ // load i64
+ const llvm::Type *EltTy = llvm::Type::Int64Ty;
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ Ops[0] = Builder.CreateLoad(Ops[0], "load");
+
+ // scalar to vector: insert i64 into 2 x i64 undef
+ llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(llvm::UndefValue::get(VecTy),
+ Ops[0], Zero, "s2v");
+
+ // shuffle into zero vector.
+ std::vector<llvm::Constant *>Elts;
+ Elts.resize(2, llvm::ConstantInt::get(EltTy, 0));
+ llvm::Value *ZV = ConstantVector::get(Elts);
+ Ops[0] = EmitShuffleVector(ZV, Ops[0], 2, 1, "loadl");
+
+ // bitcast to result.
+ return Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(llvm::Type::Int32Ty, 4));
+ }
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrw");
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrd");
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrq");
+ case X86::BI__builtin_ia32_andps:
+ case X86::BI__builtin_ia32_andpd:
+ case X86::BI__builtin_ia32_andnps:
+ case X86::BI__builtin_ia32_andnpd:
+ case X86::BI__builtin_ia32_orps:
+ case X86::BI__builtin_ia32_orpd:
+ case X86::BI__builtin_ia32_xorpd:
+ case X86::BI__builtin_ia32_xorps: {
+ const llvm::Type *ITy = llvm::VectorType::get(llvm::Type::Int32Ty, 4);
+ const llvm::Type *FTy = Ops[0]->getType();
+ Ops[0] = Builder.CreateBitCast(Ops[0], ITy, "bitcast");
+ Ops[1] = Builder.CreateBitCast(Ops[1], ITy, "bitcast");
+ switch (BuiltinID) {
+ case X86::BI__builtin_ia32_andps:
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andps");
+ break;
+ case X86::BI__builtin_ia32_andpd:
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andpd");
+ break;
+ case X86::BI__builtin_ia32_andnps:
+ Ops[0] = Builder.CreateNot(Ops[0], "not");
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andnps");
+ break;
+ case X86::BI__builtin_ia32_andnpd:
+ Ops[0] = Builder.CreateNot(Ops[0], "not");
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andnpd");
+ break;
+ case X86::BI__builtin_ia32_orps:
+ Ops[0] = Builder.CreateOr(Ops[0], Ops[1], "orps");
+ break;
+ case X86::BI__builtin_ia32_orpd:
+ Ops[0] = Builder.CreateOr(Ops[0], Ops[1], "orpd");
+ break;
+ case X86::BI__builtin_ia32_xorps:
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1], "xorps");
+ break;
+ case X86::BI__builtin_ia32_xorpd:
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1], "xorpd");
+ break;
+ }
+ return Builder.CreateBitCast(Ops[0], FTy, "bitcast");
+ }
+ }
+}
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ default: return 0;
+ }
+}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..731e38c
--- /dev/null
+++ b/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,454 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+void
+CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ // FIXME: This should use __cxa_guard_{acquire,release}?
+
+ assert(!getContext().getLangOptions().ThreadsafeStatics &&
+ "thread safe statics are currently not supported!");
+
+ llvm::SmallString<256> GuardVName;
+ llvm::raw_svector_ostream GuardVOut(GuardVName);
+ mangleGuardVariable(&D, getContext(), GuardVOut);
+
+ // Create the guard variable.
+ llvm::GlobalValue *GuardV =
+ new llvm::GlobalVariable(llvm::Type::Int64Ty, false,
+ GV->getLinkage(),
+ llvm::Constant::getNullValue(llvm::Type::Int64Ty),
+ GuardVName.c_str(),
+ &CGM.getModule());
+
+ // Load the first byte of the guard variable.
+ const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::Int8Ty, 0);
+ llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
+ "tmp");
+
+ // Compare it against 0.
+ llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::Int8Ty);
+ llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool");
+
+ llvm::BasicBlock *InitBlock = createBasicBlock("init");
+ llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
+
+ // If the guard variable is 0, jump to the initializer code.
+ Builder.CreateCondBr(ICmp, InitBlock, EndBlock);
+
+ EmitBlock(InitBlock);
+
+ const Expr *Init = D.getInit();
+ if (!hasAggregateLLVMType(Init->getType())) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ Builder.CreateStore(V, GV, D.getType().isVolatileQualified());
+ } else if (Init->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, GV, D.getType().isVolatileQualified());
+ } else {
+ EmitAggExpr(Init, GV, D.getType().isVolatileQualified());
+ }
+
+ Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::Int8Ty, 1),
+ Builder.CreateBitCast(GuardV, PtrTy));
+
+ EmitBlock(EndBlock);
+}
+
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This),
+ MD->getThisType(getContext())));
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+ QualType ResultType = MD->getType()->getAsFunctionType()->getResultType();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
+ Callee, Args, MD);
+}
+
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) {
+ const MemberExpr *ME = cast<MemberExpr>(CE->getCallee());
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
+
+ llvm::Value *This;
+
+ if (ME->isArrow())
+ This = EmitScalarExpr(ME->getBase());
+ else {
+ LValue BaseLV = EmitLValue(ME->getBase());
+ This = BaseLV.getAddress();
+ }
+
+ return EmitCXXMemberCall(MD, Callee, This,
+ CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+
+ const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
+
+ llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+
+ return EmitCXXMemberCall(MD, Callee, This,
+ E->arg_begin() + 1, E->arg_end());
+}
+
+llvm::Value *CodeGenFunction::LoadCXXThis() {
+ assert(isa<CXXMethodDecl>(CurFuncDecl) &&
+ "Must be in a C++ member function decl to load 'this'");
+ assert(cast<CXXMethodDecl>(CurFuncDecl)->isInstance() &&
+ "Must be in a C++ member function decl to load 'this'");
+
+ // FIXME: What if we're inside a block?
+ // ans: See how CodeGenFunction::LoadObjCSelf() uses
+ // CodeGenFunction::BlockForwardSelf() for how to do this.
+ return Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
+}
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, This, ArgBeg, ArgEnd);
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ llvm::Value *This) {
+ llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, This, 0, 0);
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+ const CXXConstructExpr *E) {
+ assert(Dest && "Must have a destination!");
+
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(E->getType()->getAsRecordType()->getDecl());
+ if (RD->hasTrivialConstructor())
+ return;
+
+ // Call the constructor.
+ EmitCXXConstructorCall(E->getConstructor(), Ctor_Complete, Dest,
+ E->arg_begin(), E->arg_end());
+}
+
+void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
+ llvm::Value *Ptr) {
+ LiveTemporaries.push_back(Temporary);
+
+ // Make a cleanup scope and emit the destructor.
+ {
+ CleanupScope Scope(*this);
+
+ EmitCXXDestructorCall(Temporary->getDestructor(), Dtor_Complete, Ptr);
+ }
+}
+
+RValue
+CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc,
+ bool isAggLocVolatile) {
+ // Keep track of the current cleanup stack depth.
+ size_t CleanupStackDepth = CleanupEntries.size();
+
+ unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+ RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, isAggLocVolatile);
+
+ // Go through the temporaries backwards.
+ for (unsigned i = E->getNumTemporaries(); i != 0; --i) {
+ assert(LiveTemporaries.back() == E->getTemporary(i - 1));
+ LiveTemporaries.pop_back();
+ }
+
+ assert(OldNumLiveTemporaries == LiveTemporaries.size() &&
+ "Live temporary stack mismatch!");
+
+ EmitCleanupBlocks(CleanupStackDepth);
+
+ return RV;
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ if (E->isArray()) {
+ ErrorUnsupported(E, "new[] expression");
+ return llvm::UndefValue::get(ConvertType(E->getType()));
+ }
+
+ QualType AllocType = E->getAllocatedType();
+ FunctionDecl *NewFD = E->getOperatorNew();
+ const FunctionProtoType *NewFTy = NewFD->getType()->getAsFunctionProtoType();
+
+ CallArgList NewArgs;
+
+ // The allocation size is the first argument.
+ QualType SizeTy = getContext().getSizeType();
+ llvm::Value *AllocSize =
+ llvm::ConstantInt::get(ConvertType(SizeTy),
+ getContext().getTypeSize(AllocType) / 8);
+
+ NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
+
+ // Emit the rest of the arguments.
+ // FIXME: Ideally, this should just use EmitCallArgs.
+ CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
+
+ // First, use the types from the function type.
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
+ QualType ArgType = NewFTy->getArgType(i);
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
+ NewArg != NewArgEnd; ++NewArg) {
+ QualType ArgType = NewArg->getType();
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+ }
+
+ // Emit the call to new.
+ RValue RV =
+ EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
+ CGM.GetAddrOfFunction(GlobalDecl(NewFD)),
+ NewArgs, NewFD);
+
+ // If an allocation function is declared with an empty exception specification
+ // it returns null to indicate failure to allocate storage. [expr.new]p13.
+ // (We don't need to check for null when there's no new initializer and
+ // we're allocating a POD type).
+ bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
+ !(AllocType->isPODType() && !E->hasInitializer());
+
+ llvm::BasicBlock *NewNull = 0;
+ llvm::BasicBlock *NewNotNull = 0;
+ llvm::BasicBlock *NewEnd = 0;
+
+ llvm::Value *NewPtr = RV.getScalarVal();
+
+ if (NullCheckResult) {
+ NewNull = createBasicBlock("new.null");
+ NewNotNull = createBasicBlock("new.notnull");
+ NewEnd = createBasicBlock("new.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(NewPtr,
+ llvm::Constant::getNullValue(NewPtr->getType()),
+ "isnull");
+
+ Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
+ EmitBlock(NewNotNull);
+ }
+
+ NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+
+ if (AllocType->isPODType()) {
+ if (E->getNumConstructorArgs() > 0) {
+ assert(E->getNumConstructorArgs() == 1 &&
+ "Can only have one argument to initializer of POD type.");
+
+ const Expr *Init = E->getConstructorArg(0);
+
+ if (!hasAggregateLLVMType(AllocType))
+ Builder.CreateStore(EmitScalarExpr(Init), NewPtr);
+ else if (AllocType->isAnyComplexType())
+ EmitComplexExprIntoAddr(Init, NewPtr, AllocType.isVolatileQualified());
+ else
+ EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+ }
+ } else {
+ // Call the constructor.
+ CXXConstructorDecl *Ctor = E->getConstructor();
+
+ EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
+ E->constructor_arg_begin(),
+ E->constructor_arg_end());
+ }
+
+ if (NullCheckResult) {
+ Builder.CreateBr(NewEnd);
+ EmitBlock(NewNull);
+ Builder.CreateBr(NewEnd);
+ EmitBlock(NewEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(NewPtr, NewNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
+
+ NewPtr = PHI;
+ }
+
+ return NewPtr;
+}
+
+static bool canGenerateCXXstructor(const CXXRecordDecl *RD,
+ ASTContext &Context) {
+ // The class has base classes - we don't support that right now.
+ if (RD->getNumBases() > 0)
+ return false;
+
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(Context),
+ E = RD->field_end(Context); I != E; ++I) {
+ // We don't support ctors for fields that aren't POD.
+ if (!I->getType()->isPODType())
+ return false;
+ }
+
+ return true;
+}
+
+void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ if (!canGenerateCXXstructor(D->getParent(), getContext())) {
+ ErrorUnsupported(D, "C++ constructor", true);
+ return;
+ }
+
+ EmitGlobal(GlobalDecl(D, Ctor_Complete));
+ EmitGlobal(GlobalDecl(D, Ctor_Base));
+}
+
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+
+ llvm::Function *Fn = GetAddrOfCXXConstructor(D, Type);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::Function *
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
+
+ const char *Name = getMangledCXXCtorName(D, Type);
+ return cast<llvm::Function>(
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ mangleCXXCtor(D, Type, Context, Out);
+
+ Name += '\0';
+ return UniqueMangledName(Name.begin(), Name.end());
+}
+
+void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ if (!canGenerateCXXstructor(D->getParent(), getContext())) {
+ ErrorUnsupported(D, "C++ destructor", true);
+ return;
+ }
+
+ EmitCXXDestructor(D, Dtor_Complete);
+ EmitCXXDestructor(D, Dtor_Base);
+}
+
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ llvm::Function *Fn = GetAddrOfCXXDestructor(D, Type);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::Function *
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
+
+ const char *Name = getMangledCXXDtorName(D, Type);
+ return cast<llvm::Function>(
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ mangleCXXDtor(D, Type, Context, Out);
+
+ Name += '\0';
+ return UniqueMangledName(Name.begin(), Name.end());
+}
diff --git a/lib/CodeGen/CGCXX.h b/lib/CodeGen/CGCXX.h
new file mode 100644
index 0000000..6051d91
--- /dev/null
+++ b/lib/CodeGen/CGCXX.h
@@ -0,0 +1,36 @@
+//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCXX_H
+#define CLANG_CODEGEN_CGCXX_H
+
+namespace clang {
+
+/// CXXCtorType - C++ constructor types
+enum CXXCtorType {
+ Ctor_Complete, // Complete object ctor
+ Ctor_Base, // Base object ctor
+ Ctor_CompleteAllocating // Complete object allocating ctor
+};
+
+/// CXXDtorType - C++ destructor types
+enum CXXDtorType {
+ Dtor_Deleting, // Deleting dtor
+ Dtor_Complete, // Complete object dtor
+ Dtor_Base // Base object dtor
+};
+
+} // end namespace clang
+
+#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..ea0b887
--- /dev/null
+++ b/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,2196 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetData.h"
+
+#include "ABIInfo.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+// FIXME: Use iterator and sidestep silly type array creation.
+
+const
+CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
+ return getFunctionInfo(FTNP->getResultType(),
+ llvm::SmallVector<QualType, 16>());
+}
+
+const
+CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
+ llvm::SmallVector<QualType, 16> ArgTys;
+ // FIXME: Kill copy.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+ return getFunctionInfo(FTP->getResultType(), ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
+ llvm::SmallVector<QualType, 16> ArgTys;
+ // Add the 'this' pointer unless this is a static method.
+ if (MD->isInstance())
+ ArgTys.push_back(MD->getThisType(Context));
+
+ const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+ return getFunctionInfo(FTP->getResultType(), ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance())
+ return getFunctionInfo(MD);
+
+ const FunctionType *FTy = FD->getType()->getAsFunctionType();
+ if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
+ return getFunctionInfo(FTP);
+ return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
+ llvm::SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(MD->getSelfDecl()->getType());
+ ArgTys.push_back(Context.getObjCSelType());
+ // FIXME: Kill copy?
+ for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
+ e = MD->param_end(); i != e; ++i)
+ ArgTys.push_back((*i)->getType());
+ return getFunctionInfo(MD->getResultType(), ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const CallArgList &Args) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<QualType, 16> ArgTys;
+ for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(i->second);
+ return getFunctionInfo(ResTy, ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<QualType, 16> ArgTys;
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(i->second);
+ return getFunctionInfo(ResTy, ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const llvm::SmallVector<QualType, 16> &ArgTys) {
+ // Lookup or create unique function info.
+ llvm::FoldingSetNodeID ID;
+ CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
+
+ void *InsertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+ if (FI)
+ return *FI;
+
+ // Construct the function info.
+ FI = new CGFunctionInfo(ResTy, ArgTys);
+ FunctionInfos.InsertNode(FI, InsertPos);
+
+ // Compute ABI information.
+ getABIInfo().computeInfo(*FI, getContext());
+
+ return *FI;
+}
+
+/***/
+
+ABIInfo::~ABIInfo() {}
+
+void ABIArgInfo::dump() const {
+ fprintf(stderr, "(ABIArgInfo Kind=");
+ switch (TheKind) {
+ case Direct:
+ fprintf(stderr, "Direct");
+ break;
+ case Ignore:
+ fprintf(stderr, "Ignore");
+ break;
+ case Coerce:
+ fprintf(stderr, "Coerce Type=");
+ getCoerceToType()->print(llvm::errs());
+ break;
+ case Indirect:
+ fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
+ break;
+ case Expand:
+ fprintf(stderr, "Expand");
+ break;
+ }
+ fprintf(stderr, ")\n");
+}
+
+/***/
+
+static bool isEmptyRecord(ASTContext &Context, QualType T);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+ // Constant arrays of empty records count as empty, strip them off.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ FT = AT->getElementType();
+
+ return isEmptyRecord(Context, FT);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T) {
+ const RecordType *RT = T->getAsRecordType();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i)
+ if (!isEmptyField(Context, *i))
+ return false;
+ return true;
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
+ ASTContext &Context) {
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ ASTContext &Context;
+ bool IsDarwin;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+
+public:
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(ASTContext &Context, bool d)
+ : ABIInfo(), Context(Context), IsDarwin(d) {}
+};
+}
+
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, or complex type, it is ok.
+ if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAsRecordType();
+ if (!RT) return false;
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context),
+ e = RT->getDecl()->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (const VectorType *VT = RetTy->getAsVectorType()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwin) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
+ 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = RetTy->getAsStructureType())
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+
+ // Outside of Darwin, structs and unions are always indirect.
+ if (!IsDarwin && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Classify "single element" structs as their element type.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
+ if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
+ if (BT->isIntegerType()) {
+ // We need to use the size of the structure, padding
+ // bit-fields can adjust that to be larger than the single
+ // element type.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
+ } else if (BT->getKind() == BuiltinType::Float) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
+ } else if (BT->getKind() == BuiltinType::Double) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
+ }
+ } else if (SeltTy->isPointerType()) {
+ // FIXME: It would be really nice if this could come out as the proper
+ // pointer type.
+ llvm::Type *PtrTy =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ return ABIArgInfo::getCoerce(PtrTy);
+ } else if (SeltTy->isVectorType()) {
+ // 64- and 128-bit vectors are never returned in a
+ // register when inside a structure.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size == 64 || Size == 128)
+ return ABIArgInfo::getIndirect(0);
+
+ return classifyReturnType(QualType(SeltTy, 0), Context);
+ }
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAsStructureType())
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+
+ // Ignore empty structs.
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (Ty->isStructureType() && Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Expand structs with size <= 128-bits which consist only of
+ // basic types (int, long long, float, double, xxx*). This is
+ // non-recursive and does not ignore empty fields.
+ if (const RecordType *RT = Ty->getAsStructureType()) {
+ if (Context.getTypeSize(Ty) <= 4*32 &&
+ areAllFields32Or64BitBasicType(RT->getDecl(), Context))
+ return ABIArgInfo::getExpand();
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ Class merge(Class Accum, Class Field) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const;
+
+ /// getCoerceResult - Given a source type \arg Ty and an LLVM type
+ /// to coerce to, chose the best way to pass Ty in the same place
+ /// that \arg CoerceTo would be passed, but while keeping the
+ /// emitted code as simple as possible.
+ ///
+ /// FIXME: Note, this should be cleaned up to just take an enumeration of all
+ /// the ways we might want to pass things, instead of constructing an LLVM
+ /// type. This makes this code more explicit, and it makes it clearer that we
+ /// are also doing this for correctness in the case of passing scalar types.
+ ABIArgInfo getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+public:
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
+ Class Field) const {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ else if (Field == Memory)
+ return Memory;
+ else if (Accum == NoClass)
+ return Field;
+ else if (Accum == Integer || Field == Integer)
+ return Integer;
+ else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ else
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty,
+ ASTContext &Context,
+ uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ } else if (const EnumType *ET = Ty->getAsEnumType()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
+ } else if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ } else if (const VectorType *VT = Ty->getAsVectorType()) {
+ uint64_t Size = Context.getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128) {
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ } else if (const ComplexType *CT = Ty->getAsComplexType()) {
+ QualType ET = Context.getCanonicalType(CT->getElementType());
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (ET->isIntegralType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == Context.FloatTy)
+ Current = SSE;
+ else if (ET == Context.DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == Context.LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = Context.getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // Do post merger cleanup (see below). Only case we worry about is Memory.
+ if (Hi == Memory)
+ Lo = Memory;
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ } else if (const RecordType *RT = Ty->getAsRecordType()) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is MEMORY, the whole argument is
+ // passed in memory.
+ //
+ // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
+
+ // The first of these conditions is guaranteed by how we implement
+ // the merge (just bail).
+ //
+ // The second condition occurs in the case of unions; for example
+ // union { _Complex double; unsigned; }.
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const {
+ if (CoerceTo == llvm::Type::Int64Ty) {
+ // Integer and pointer types will end up in a general purpose
+ // register.
+ if (Ty->isIntegralType() || Ty->isPointerType())
+ return ABIArgInfo::getDirect();
+
+ } else if (CoerceTo == llvm::Type::DoubleTy) {
+ // FIXME: It would probably be better to make CGFunctionInfo only map using
+ // canonical types than to canonize here.
+ QualType CTy = Context.getCanonicalType(Ty);
+
+ // Float and double end up in a single SSE reg.
+ if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
+ return ABIArgInfo::getDirect();
+
+ }
+
+ return ABIArgInfo::getCoerce(CoerceTo);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty))
+ return ABIArgInfo::getDirect();
+
+ // FIXME: Set alignment correctly.
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectResult(RetTy, Context);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = llvm::Type::Int64Ty; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = llvm::Type::DoubleTy; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::X86_FP80Ty; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
+ llvm::Type::X86_FP80Ty,
+ NULL);
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ assert(0 && "Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass: break;
+
+ case Integer:
+ ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
+ break;
+ case SSE:
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the upper half of the last used SSE register.
+ //
+ // SSEUP should always be preceeded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceeded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceeded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87)
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ break;
+ }
+
+ return getCoerceResult(RetTy, ResType, Context);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+ unsigned &neededInt,
+ unsigned &neededSSE) const {
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ return getIndirectResult(Ty, Context);
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+ ResType = llvm::Type::Int64Ty;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE:
+ ++neededSSE;
+ ResType = llvm::Type::DoubleTy;
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceed by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ assert(0 && "Invalid classification for hi word.");
+ break;
+
+ case NoClass: break;
+ case Integer:
+ ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
+ ++neededInt;
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
+ break;
+ }
+
+ return getCoerceResult(Ty, ResType, Context);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, Context);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // Note that we follow the ABI & gcc here, even though the type
+ // could in theory have an alignment greater than 16. This case
+ // shouldn't ever matter in practice.
+
+ // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ llvm::Type::Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+ ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
+ neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs =
+ CGF.Builder.CreateICmpULE(gp_offset,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 48 - neededInt * 8),
+ "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ CGF.Builder.CreateICmpULE(fp_offset,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 176 - neededSSE * 16),
+ "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+ const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ const llvm::Type *TyLo = ST->getElementType(0);
+ const llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
+ "Unexpected ABI info for mixed regs");
+ const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi =
+ CGF.Builder.CreateGEP(RegAddrLo,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
+ llvm::Type::DoubleTy,
+ NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+ "vaarg.addr");
+ ResAddr->reserveOperandSpace(2);
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+
+ return ResAddr;
+}
+
+// ABI Info for PIC16
+class PIC16ABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+};
+
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ return ABIArgInfo::getDirect();
+}
+
+llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+class ARMABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ it->info = classifyArgumentType(it->type, Context);
+ }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getDirect();
+ }
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ const llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (Context.getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::Int64Ty;
+ SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::Int32Ty;
+ SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
+ }
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
+ const llvm::Type* STy = llvm::StructType::get(LLVMFields, true);
+ return ABIArgInfo::getCoerce(STy);
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size <= 32)
+ return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+const ABIInfo &CodeGenTypes::getABIInfo() const {
+ if (TheABIInfo)
+ return *TheABIInfo;
+
+ // For now we just cache this in the CodeGenTypes and don't bother
+ // to free it.
+ const char *TargetPrefix = getContext().Target.getTargetPrefix();
+ if (strcmp(TargetPrefix, "x86") == 0) {
+ bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin");
+ switch (getContext().Target.getPointerWidth(0)) {
+ case 32:
+ return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin));
+ case 64:
+ return *(TheABIInfo = new X86_64ABIInfo());
+ }
+ } else if (strcmp(TargetPrefix, "arm") == 0) {
+ // FIXME: Support for OABI?
+ return *(TheABIInfo = new ARMABIInfo());
+ } else if (strcmp(TargetPrefix, "pic16") == 0) {
+ return *(TheABIInfo = new PIC16ABIInfo());
+ }
+
+ return *(TheABIInfo = new DefaultABIInfo);
+}
+
+/***/
+
+CGFunctionInfo::CGFunctionInfo(QualType ResTy,
+ const llvm::SmallVector<QualType, 16> &ArgTys) {
+ NumArgs = ArgTys.size();
+ Args = new ArgInfo[1 + NumArgs];
+ Args[0].type = ResTy;
+ for (unsigned i = 0; i < NumArgs; ++i)
+ Args[1 + i].type = ArgTys[i];
+}
+
+/***/
+
+void CodeGenTypes::GetExpandedTypes(QualType Ty,
+ std::vector<const llvm::Type*> &ArgTys) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+
+ QualType FT = FD->getType();
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ GetExpandedTypes(FT, ArgTys);
+ } else {
+ ArgTys.push_back(ConvertType(FT));
+ }
+ }
+}
+
+llvm::Function::arg_iterator
+CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator AI) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
+ llvm::Value *Addr = LV.getAddress();
+ for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
+ e = RD->field_end(getContext()); i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, false, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ AI = ExpandTypeFromArgs(FT, LV, AI);
+ } else {
+ EmitStoreThroughLValue(RValue::get(AI), LV, FT);
+ ++AI;
+ }
+ }
+
+ return AI;
+}
+
+void
+CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ llvm::SmallVector<llvm::Value*, 16> &Args) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
+ e = RD->field_end(getContext()); i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, false, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
+ } else {
+ RValue RV = EmitLoadOfLValue(LV, FT);
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+ Args.push_back(RV.getScalarVal());
+ }
+ }
+}
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
+ const llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ // If load is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ return Load;
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
+ }
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+ llvm::Value *DstPtr,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy = Src->getType();
+ const llvm::Type *DstTy =
+ cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ // If store is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
+ CGF.Builder.CreateStore(Src, Tmp);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ CGF.Builder.CreateStore(Load, DstPtr);
+ }
+}
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
+ return FI.getReturnInfo().isIndirect();
+}
+
+const llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
+ std::vector<const llvm::Type*> ArgTys;
+
+ const llvm::Type *ResultType = 0;
+
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+
+ case ABIArgInfo::Direct:
+ ResultType = ConvertType(RetTy);
+ break;
+
+ case ABIArgInfo::Indirect: {
+ assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
+ ResultType = llvm::Type::VoidTy;
+ const llvm::Type *STy = ConvertType(RetTy);
+ ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ ResultType = llvm::Type::VoidTy;
+ break;
+
+ case ABIArgInfo::Coerce:
+ ResultType = RetAI.getCoerceToType();
+ break;
+ }
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ const ABIArgInfo &AI = it->info;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce:
+ ArgTys.push_back(AI.getCoerceToType());
+ break;
+
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ const llvm::Type *LTy = ConvertTypeForMem(it->type);
+ ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ break;
+ }
+
+ case ABIArgInfo::Direct:
+ ArgTys.push_back(ConvertType(it->type));
+ break;
+
+ case ABIArgInfo::Expand:
+ GetExpandedTypes(it->type, ArgTys);
+ break;
+ }
+ }
+
+ return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
+}
+
+void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
+ const Decl *TargetDecl,
+ AttributeListType &PAL) {
+ unsigned FuncAttrs = 0;
+ unsigned RetAttrs = 0;
+
+ // FIXME: handle sseregparm someday...
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<NoThrowAttr>())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ if (TargetDecl->hasAttr<NoReturnAttr>())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+ if (TargetDecl->hasAttr<ConstAttr>())
+ FuncAttrs |= llvm::Attribute::ReadNone;
+ else if (TargetDecl->hasAttr<PureAttr>())
+ FuncAttrs |= llvm::Attribute::ReadOnly;
+ }
+
+ QualType RetTy = FI.getReturnType();
+ unsigned Index = 1;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Direct:
+ if (RetTy->isPromotableIntegerType()) {
+ if (RetTy->isSignedIntegerType()) {
+ RetAttrs |= llvm::Attribute::SExt;
+ } else if (RetTy->isUnsignedIntegerType()) {
+ RetAttrs |= llvm::Attribute::ZExt;
+ }
+ }
+ break;
+
+ case ABIArgInfo::Indirect:
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attribute::StructRet |
+ llvm::Attribute::NoAlias));
+ ++Index;
+ // sret disables readnone and readonly
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::Coerce:
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ if (RetAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+
+ // FIXME: we need to honour command line settings also...
+ // FIXME: RegParm should be reduced in case of nested functions and/or global
+ // register variable.
+ signed RegParm = 0;
+ if (TargetDecl)
+ if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>())
+ RegParm = RegParmAttr->getNumParams();
+
+ unsigned PointerWidth = getContext().Target.getPointerWidth(0);
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ QualType ParamType = it->type;
+ const ABIArgInfo &AI = it->info;
+ unsigned Attributes = 0;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Coerce:
+ break;
+
+ case ABIArgInfo::Indirect:
+ Attributes |= llvm::Attribute::ByVal;
+ Attributes |=
+ llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+ // byval disables readnone and readonly.
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Direct:
+ if (ParamType->isPromotableIntegerType()) {
+ if (ParamType->isSignedIntegerType()) {
+ Attributes |= llvm::Attribute::SExt;
+ } else if (ParamType->isUnsignedIntegerType()) {
+ Attributes |= llvm::Attribute::ZExt;
+ }
+ }
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attributes |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+ break;
+
+ case ABIArgInfo::Ignore:
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Expand: {
+ std::vector<const llvm::Type*> Tys;
+ // FIXME: This is rather inefficient. Do we ever actually need to do
+ // anything here? The result should be just reconstructed on the other
+ // side, so extension should be a non-issue.
+ getTypes().GetExpandedTypes(ParamType, Tys);
+ Index += Tys.size();
+ continue;
+ }
+ }
+
+ if (Attributes)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+ ++Index;
+ }
+ if (FuncAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ // FIXME: We no longer need the types from FunctionArgList; lift up and
+ // simplify.
+
+ // Emit allocs for param decls. Give the LLVM Argument nodes names.
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+
+ // Name the struct return argument.
+ if (CGM.ReturnTypeUsesSret(FI)) {
+ AI->setName("agg.result");
+ ++AI;
+ }
+
+ assert(FI.arg_size() == Args.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it) {
+ const VarDecl *Arg = i->first;
+ QualType Ty = info_it->type;
+ const ABIArgInfo &ArgI = info_it->info;
+
+ switch (ArgI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ llvm::Value* V = AI;
+ if (hasAggregateLLVMType(Ty)) {
+ // Do nothing, aggregates and complex variables are accessed by
+ // reference.
+ } else {
+ // Load scalar value from indirect argument.
+ V = EmitLoadOfScalar(V, false, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ case ABIArgInfo::Direct: {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value* V = AI;
+ if (hasAggregateLLVMType(Ty)) {
+ // Create a temporary alloca to hold the argument; the rest of
+ // codegen expects to access aggregates & complex values by
+ // reference.
+ V = CreateTempAlloca(ConvertTypeForMem(Ty));
+ Builder.CreateStore(AI, V);
+ } else {
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ std::string Name = Arg->getNameAsString();
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
+ (Name + ".addr").c_str());
+ // FIXME: What are the right qualifiers here?
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
+ EmitParmDecl(*Arg, Temp);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Name + "." + llvm::utostr(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty)) {
+ EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
+ } else {
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+ }
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Coerce: {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
+ // result in a new alloca anyway, so we could just store into that
+ // directly if we broke the abstraction down more.
+ llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
+ CreateCoercedStore(AI, V, *this);
+ // Match to what EmitParmDecl is expecting for this type.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ V = EmitLoadOfScalar(V, false, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+ }
+
+ ++AI;
+ }
+ assert(AI == Fn->arg_end() && "Argument mismatch!");
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
+ llvm::Value *ReturnValue) {
+ llvm::Value *RV = 0;
+
+ // Functions with no result always return void.
+ if (ReturnValue) {
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, RetTy);
+ }
+ break;
+
+ case ABIArgInfo::Direct:
+ // The internal return value temp always will have
+ // pointer-to-return-type type.
+ RV = Builder.CreateLoad(ReturnValue);
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce:
+ RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+ }
+
+ if (RV) {
+ Builder.CreateRet(RV);
+ } else {
+ Builder.CreateRetVoid();
+ }
+}
+
+RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
+ if (ArgType->isReferenceType())
+ return EmitReferenceBindingToExpr(E, ArgType);
+
+ return EmitAnyExprToTemp(E);
+}
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+ llvm::Value *Callee,
+ const CallArgList &CallArgs,
+ const Decl *TargetDecl) {
+ // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ llvm::SmallVector<llvm::Value*, 16> Args;
+
+ // Handle struct-return functions by passing a pointer to the
+ // location that we would like to return into.
+ QualType RetTy = CallInfo.getReturnType();
+ const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+ if (CGM.ReturnTypeUsesSret(CallInfo)) {
+ // Create a temporary alloca to hold the result of the call. :(
+ Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
+ }
+
+ assert(CallInfo.arg_size() == CallArgs.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ I != E; ++I, ++info_it) {
+ const ABIArgInfo &ArgInfo = info_it->info;
+ RValue RV = I->first;
+
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RV.isScalar() || RV.isComplex()) {
+ // Make a temporary alloca to pass the argument.
+ Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
+ if (RV.isScalar())
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
+ else
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ } else {
+ Args.push_back(RV.getAggregateAddr());
+ }
+ break;
+
+ case ABIArgInfo::Direct:
+ if (RV.isScalar()) {
+ Args.push_back(RV.getScalarVal());
+ } else if (RV.isComplex()) {
+ llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
+ Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
+ Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
+ Args.push_back(Tmp);
+ } else {
+ Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ }
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce: {
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *SrcPtr;
+ if (RV.isScalar()) {
+ SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
+ } else if (RV.isComplex()) {
+ SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
+ StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+ } else
+ SrcPtr = RV.getAggregateAddr();
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ ExpandTypeToArgs(I->second, RV, Args);
+ break;
+ }
+ }
+
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.end());
+
+ llvm::CallSite CS;
+ if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
+ CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
+ } else {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
+ Args.data(), Args.data()+Args.size());
+ EmitBlock(Cont);
+ }
+
+ CS.setAttributes(Attrs);
+ if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
+ CS.setCallingConv(F->getCallingConv());
+
+ // If the call doesn't return, finish the basic block and clear the
+ // insertion point; this allows the rest of IRgen to discard
+ // unreachable code.
+ if (CS.doesNotReturn()) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in
+ // generally are not ready to handle emitting expressions at unreachable
+ // points.
+ EnsureInsertPoint();
+
+ // Return a reasonable RValue.
+ return GetUndefRValue(RetTy);
+ }
+
+ llvm::Instruction *CI = CS.getInstruction();
+ if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
+ CI->setName("call");
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(Args[0]);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
+
+ case ABIArgInfo::Direct:
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
+ Builder.CreateStore(CI, V);
+ return RValue::getAggregate(V);
+ }
+ return RValue::get(CI);
+
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Coerce: {
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
+ CreateCoercedStore(CI, V, *this);
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(V, false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(V);
+ return RValue::get(EmitLoadOfScalar(V, false, RetTy));
+ }
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ assert(0 && "Unhandled ABIArgInfo::Kind");
+ return RValue::get(0);
+}
+
+/* VarArg handling */
+
+llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
+ return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+}
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..daf6f00
--- /dev/null
+++ b/lib/CodeGen/CGCall.h
@@ -0,0 +1,104 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCALL_H
+#define CLANG_CODEGEN_CGCALL_H
+
+#include <llvm/ADT/FoldingSet.h>
+#include "clang/AST/Type.h"
+
+#include "CGValue.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+ struct AttributeWithIndex;
+ class Function;
+ class Type;
+ class Value;
+
+ template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class FunctionDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+
+ /// CallArgList - Type for representing both the value and type of
+ /// arguments in a call.
+ typedef llvm::SmallVector<std::pair<RValue, QualType>, 16> CallArgList;
+
+ /// FunctionArgList - Type for representing both the decl and type
+ /// of parameters to a function. The decl must be either a
+ /// ParmVarDecl or ImplicitParamDecl.
+ typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
+ 16> FunctionArgList;
+
+ /// CGFunctionInfo - Class to encapsulate the information about a
+ /// function definition.
+ class CGFunctionInfo : public llvm::FoldingSetNode {
+ struct ArgInfo {
+ QualType type;
+ ABIArgInfo info;
+ };
+
+ unsigned NumArgs;
+ ArgInfo *Args;
+
+ public:
+ typedef const ArgInfo *const_arg_iterator;
+ typedef ArgInfo *arg_iterator;
+
+ CGFunctionInfo(QualType ResTy,
+ const llvm::SmallVector<QualType, 16> &ArgTys);
+ ~CGFunctionInfo() { delete[] Args; }
+
+ const_arg_iterator arg_begin() const { return Args + 1; }
+ const_arg_iterator arg_end() const { return Args + 1 + NumArgs; }
+ arg_iterator arg_begin() { return Args + 1; }
+ arg_iterator arg_end() { return Args + 1 + NumArgs; }
+
+ unsigned arg_size() const { return NumArgs; }
+
+ QualType getReturnType() const { return Args[0].type; }
+
+ ABIArgInfo &getReturnInfo() { return Args[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ getReturnType().Profile(ID);
+ for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
+ it->type.Profile(ID);
+ }
+ template<class Iterator>
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType ResTy,
+ Iterator begin,
+ Iterator end) {
+ ResTy.Profile(ID);
+ for (; begin != end; ++begin)
+ begin->Profile(ID);
+ }
+ };
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..049e716
--- /dev/null
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,987 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/CompileOptions.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/System/Path.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule *m)
+ : M(m), isMainCompileUnitCreated(false), DebugFactory(M->getModule()),
+ BlockLiteralGenericSet(false) {
+}
+
+CGDebugInfo::~CGDebugInfo() {
+ assert(RegionStack.empty() && "Region stack mismatch, stack not empty!");
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+ if (Loc.isValid())
+ CurLoc = M->getContext().getSourceManager().getInstantiationLoc(Loc);
+}
+
+/// getOrCreateCompileUnit - Get the compile unit from the cache or create a new
+/// one if necessary. This returns null for invalid source locations.
+llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
+ // Get source file information.
+ const char *FileName = "<unknown>";
+ SourceManager &SM = M->getContext().getSourceManager();
+ unsigned FID = 0;
+ if (Loc.isValid()) {
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ FileName = PLoc.getFilename();
+ FID = PLoc.getIncludeLoc().getRawEncoding();
+ }
+
+ // See if this compile unit has been used before.
+ llvm::DICompileUnit &Unit = CompileUnitCache[FID];
+ if (!Unit.isNull()) return Unit;
+
+ // Get absolute path name.
+ llvm::sys::Path AbsFileName(FileName);
+ if (!AbsFileName.isAbsolute()) {
+ llvm::sys::Path tmp = llvm::sys::Path::GetCurrentDirectory();
+ tmp.appendComponent(FileName);
+ AbsFileName = tmp;
+ }
+
+ // See if thie compile unit is representing main source file. Each source
+ // file has corresponding compile unit. There is only one main source
+ // file at a time.
+ bool isMain = false;
+ const LangOptions &LO = M->getLangOptions();
+ const char *MainFileName = LO.getMainFileName();
+ if (isMainCompileUnitCreated == false) {
+ if (MainFileName) {
+ if (!strcmp(AbsFileName.getLast().c_str(), MainFileName))
+ isMain = true;
+ } else {
+ if (Loc.isValid() && SM.isFromMainFile(Loc))
+ isMain = true;
+ }
+ if (isMain)
+ isMainCompileUnitCreated = true;
+ }
+
+ unsigned LangTag;
+ if (LO.CPlusPlus) {
+ if (LO.ObjC1)
+ LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ } else if (LO.ObjC1) {
+ LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.C99) {
+ LangTag = llvm::dwarf::DW_LANG_C99;
+ } else {
+ LangTag = llvm::dwarf::DW_LANG_C89;
+ }
+
+ std::string Producer = "clang 1.0";// FIXME: clang version.
+ bool isOptimized = LO.Optimize;
+ const char *Flags = ""; // FIXME: Encode command line options.
+
+ // Figure out which version of the ObjC runtime we have.
+ unsigned RuntimeVers = 0;
+ if (LO.ObjC1)
+ RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+
+ // Create new compile unit.
+ return Unit = DebugFactory.CreateCompileUnit(LangTag, AbsFileName.getLast(),
+ AbsFileName.getDirname(),
+ Producer, isMain, isOptimized,
+ Flags, RuntimeVers);
+}
+
+/// CreateType - Get the Basic type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
+ llvm::DICompileUnit Unit) {
+ unsigned Encoding = 0;
+ switch (BT->getKind()) {
+ default:
+ case BuiltinType::Void:
+ return llvm::DIType();
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
+ case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::Float:
+ case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ }
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(BT);
+ uint64_t Align = M->getContext().getTypeAlign(BT);
+ uint64_t Offset = 0;
+
+ return DebugFactory.CreateBasicType(Unit,
+ BT->getName(M->getContext().getLangOptions().CPlusPlus),
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
+ llvm::DICompileUnit Unit) {
+ // Bit size, align and offset of the type.
+ unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
+ if (Ty->isComplexIntegerType())
+ Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+ uint64_t Offset = 0;
+
+ return DebugFactory.CreateBasicType(Unit, "complex",
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+}
+
+/// getOrCreateCVRType - Get the CVR qualified type from the cache or create
+/// a new one if necessary.
+llvm::DIType CGDebugInfo::CreateCVRType(QualType Ty, llvm::DICompileUnit Unit) {
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ llvm::DIType FromTy;
+ unsigned Tag;
+ if (Ty.isConstQualified()) {
+ Tag = llvm::dwarf::DW_TAG_const_type;
+ Ty.removeConst();
+ FromTy = getOrCreateType(Ty, Unit);
+ } else if (Ty.isVolatileQualified()) {
+ Tag = llvm::dwarf::DW_TAG_volatile_type;
+ Ty.removeVolatile();
+ FromTy = getOrCreateType(Ty, Unit);
+ } else {
+ assert(Ty.isRestrictQualified() && "Unknown type qualifier for debug info");
+ Tag = llvm::dwarf::DW_TAG_restrict_type;
+ Ty.removeRestrict();
+ FromTy = getOrCreateType(Ty, Unit);
+ }
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ return DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(),
+ 0, 0, 0, 0, 0, FromTy);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DICompileUnit Unit) {
+ llvm::DIType EltTy = getOrCreateType(Ty->getPointeeType(), Unit);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+ "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0, EltTy);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
+ llvm::DICompileUnit Unit) {
+ if (BlockLiteralGenericSet)
+ return BlockLiteralGeneric;
+
+ llvm::DICompileUnit DefUnit;
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ llvm::DIType FieldTy;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIArray Elements;
+ llvm::DIType EltTy, DescTy;
+
+ FieldOffset = 0;
+ FType = M->getContext().UnsignedLongTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "reserved", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().UnsignedLongTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "Size", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ EltTys.clear();
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
+ DefUnit, 0, FieldOffset, 0, 0, 0,
+ llvm::DIType(), Elements);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0, EltTy);
+
+ FieldOffset = 0;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__isa", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().IntTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__flags", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().IntTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__reserved", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__FuncPtr", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = DescTy;
+ FieldSize = M->getContext().getTypeSize(Ty);
+ FieldAlign = M->getContext().getTypeAlign(Ty);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__descriptor", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
+ DefUnit, 0, FieldOffset, 0, 0, 0,
+ llvm::DIType(), Elements);
+
+ BlockLiteralGenericSet = true;
+ BlockLiteralGeneric
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+ "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0, EltTy);
+ return BlockLiteralGeneric;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
+ llvm::DICompileUnit Unit) {
+ // Typedefs are derived from some other type. If we have a typedef of a
+ // typedef, make sure to emit the whole chain.
+ llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+
+ // We don't set size information, but do specify where the typedef was
+ // declared.
+ std::string TyName = Ty->getDecl()->getNameAsString();
+ SourceLocation DefLoc = Ty->getDecl()->getLocation();
+ llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(DefLoc);
+
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
+ unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+ return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef, Unit,
+ TyName, DefUnit, Line, 0, 0, 0, 0, Src);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
+ llvm::DICompileUnit Unit) {
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+
+ // Set up remainder of arguments if there is a prototype.
+ // FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
+ if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ } else {
+ // FIXME: Handle () case in C. llvm-gcc doesn't do it either.
+ }
+
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", llvm::DICompileUnit(),
+ 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
+ llvm::DICompileUnit Unit) {
+ RecordDecl *Decl = Ty->getDecl();
+
+ unsigned Tag;
+ if (Decl->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (Decl->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else {
+ assert(Decl->isClass() && "Unknown RecordType!");
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+
+ SourceManager &SM = M->getContext().getSourceManager();
+
+ // Get overall information about the record type for the debug info.
+ std::string Name = Decl->getNameAsString();
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ llvm::DICompileUnit DefUnit;
+ unsigned Line = 0;
+ if (!PLoc.isInvalid()) {
+ DefUnit = getOrCreateCompileUnit(Decl->getLocation());
+ Line = PLoc.getLine();
+ }
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DIType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray());
+
+ // If this is just a forward declaration, return it.
+ if (!Decl->getDefinition(M->getContext()))
+ return FwdDecl;
+
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ const ASTRecordLayout &RL = M->getContext().getASTRecordLayout(Decl);
+
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator I = Decl->field_begin(M->getContext()),
+ E = Decl->field_end(M->getContext());
+ I != E; ++I, ++FieldNo) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+ std::string FieldName = Field->getNameAsString();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ SourceLocation FieldDefLoc = Field->getLocation();
+ PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
+ llvm::DICompileUnit FieldDefUnit;
+ unsigned FieldLine = 0;
+
+ if (!PLoc.isInvalid()) {
+ FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
+ FieldLine = PLoc.getLine();
+ }
+
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = M->getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue();
+
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ llvm::DIType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
+ Align, 0, 0, llvm::DIType(), Elements);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
+ FwdDecl.getGV()->eraseFromParent();
+
+ return RealDecl;
+}
+
+/// CreateType - get objective-c interface type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+ llvm::DICompileUnit Unit) {
+ ObjCInterfaceDecl *Decl = Ty->getDecl();
+
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+ SourceManager &SM = M->getContext().getSourceManager();
+
+ // Get overall information about the record type for the debug info.
+ std::string Name = Decl->getNameAsString();
+
+ llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(Decl->getLocation());
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+ unsigned RuntimeLang = DefUnit.getLanguage();
+
+ // To handle recursive interface, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DIType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray(),
+ RuntimeLang);
+
+ // If this is just a forward declaration, return it.
+ if (Decl->isForwardDecl())
+ return FwdDecl;
+
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ ObjCInterfaceDecl *SClass = Decl->getSuperClass();
+ if (SClass) {
+ llvm::DIType SClassTy =
+ getOrCreateType(M->getContext().getObjCInterfaceType(SClass), Unit);
+ llvm::DIType InhTag =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+ Unit, "", llvm::DICompileUnit(), 0, 0, 0,
+ 0 /* offset */, 0, SClassTy);
+ EltTys.push_back(InhTag);
+ }
+
+ const ASTRecordLayout &RL = M->getContext().getASTObjCInterfaceLayout(Decl);
+
+ unsigned FieldNo = 0;
+ for (ObjCInterfaceDecl::ivar_iterator I = Decl->ivar_begin(),
+ E = Decl->ivar_end(); I != E; ++I, ++FieldNo) {
+ ObjCIvarDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+ std::string FieldName = Field->getNameAsString();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ SourceLocation FieldDefLoc = Field->getLocation();
+ llvm::DICompileUnit FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
+ PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
+ unsigned FieldLine = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = M->getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue();
+
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+ Flags = llvm::DIType::FlagProtected;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+ Flags = llvm::DIType::FlagPrivate;
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ llvm::DIType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
+ Align, 0, 0, llvm::DIType(), Elements,
+ RuntimeLang);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
+ FwdDecl.getGV()->eraseFromParent();
+
+ return RealDecl;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
+ llvm::DICompileUnit Unit) {
+ EnumDecl *Decl = Ty->getDecl();
+
+ llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+
+ // Create DIEnumerator elements for each enumerator.
+ for (EnumDecl::enumerator_iterator
+ Enum = Decl->enumerator_begin(M->getContext()),
+ EnumEnd = Decl->enumerator_end(M->getContext());
+ Enum != EnumEnd; ++Enum) {
+ Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getNameAsString(),
+ Enum->getInitVal().getZExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DIArray EltArray =
+ DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+
+ std::string EnumName = Decl->getNameAsString();
+ SourceLocation DefLoc = Decl->getLocation();
+ llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(DefLoc);
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
+ unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+ // Size and align of the type.
+ uint64_t Size = 0;
+ unsigned Align = 0;
+ if (!Ty->isIncompleteType()) {
+ Size = M->getContext().getTypeSize(Ty);
+ Align = M->getContext().getTypeAlign(Ty);
+ }
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
+ Unit, EnumName, DefUnit, Line,
+ Size, Align, 0, 0,
+ llvm::DIType(), EltArray);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
+ llvm::DICompileUnit Unit) {
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return CreateType(RT, Unit);
+ else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
+ return CreateType(ET, Unit);
+
+ return llvm::DIType();
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
+ llvm::DICompileUnit Unit) {
+ uint64_t Size;
+ uint64_t Align;
+
+
+ // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ Size = 0;
+ Align =
+ M->getContext().getTypeAlign(M->getContext().getBaseElementType(VAT));
+ } else if (Ty->isIncompleteArrayType()) {
+ Size = 0;
+ Align = M->getContext().getTypeAlign(Ty->getElementType());
+ } else {
+ // Size and align of the whole array, not the element type.
+ Size = M->getContext().getTypeSize(Ty);
+ Align = M->getContext().getTypeAlign(Ty);
+ }
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+ QualType EltTy(Ty, 0);
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ uint64_t Upper = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ Upper = CAT->getSize().getZExtValue() - 1;
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
+ EltTy = Ty->getElementType();
+ }
+
+ llvm::DIArray SubscriptArray =
+ DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
+ Unit, "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0,
+ getOrCreateType(EltTy, Unit),
+ SubscriptArray);
+}
+
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
+ llvm::DICompileUnit Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Check to see if the compile unit already has created this type.
+ llvm::DIType &Slot = TypeCache[Ty.getAsOpaquePtr()];
+ if (!Slot.isNull()) return Slot;
+
+ // Handle CVR qualifiers, which recursively handles what they refer to.
+ if (Ty.getCVRQualifiers())
+ return Slot = CreateCVRType(Ty, Unit);
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Dependent types cannot show up in debug information");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::ExtQual:
+ case Type::FixedWidthInt:
+ case Type::MemberPointer:
+ case Type::TemplateSpecialization:
+ case Type::QualifiedName:
+ // Unsupported types
+ return llvm::DIType();
+ case Type::ObjCQualifiedId: // Encode id<p> in debug info just like id.
+ return Slot = getOrCreateType(M->getContext().getObjCIdType(), Unit);
+
+ case Type::ObjCQualifiedInterface: // Drop protocols from interface.
+ case Type::ObjCInterface:
+ return Slot = CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin: return Slot = CreateType(cast<BuiltinType>(Ty), Unit);
+ case Type::Complex: return Slot = CreateType(cast<ComplexType>(Ty), Unit);
+ case Type::Pointer: return Slot = CreateType(cast<PointerType>(Ty), Unit);
+ case Type::BlockPointer:
+ return Slot = CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef: return Slot = CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Record:
+ case Type::Enum:
+ return Slot = CreateType(cast<TagType>(Ty), Unit);
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return Slot = CreateType(cast<FunctionType>(Ty), Unit);
+
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ return Slot = CreateType(cast<ArrayType>(Ty), Unit);
+ case Type::TypeOfExpr:
+ return Slot = getOrCreateType(cast<TypeOfExprType>(Ty)->getUnderlyingExpr()
+ ->getType(), Unit);
+ case Type::TypeOf:
+ return Slot = getOrCreateType(cast<TypeOfType>(Ty)->getUnderlyingType(),
+ Unit);
+ }
+
+ return Slot;
+}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function -
+/// "llvm.dbg.func.start.".
+void CGDebugInfo::EmitFunctionStart(const char *Name, QualType ReturnType,
+ llvm::Function *Fn,
+ CGBuilderTy &Builder) {
+ const char *LinkageName = Name;
+
+ // Skip the asm prefix if it exists.
+ //
+ // FIXME: This should probably be the unmangled name?
+ if (Name[0] == '\01')
+ ++Name;
+
+ // FIXME: Why is this using CurLoc???
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
+ SourceManager &SM = M->getContext().getSourceManager();
+ unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine();
+
+ llvm::DISubprogram SP =
+ DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
+ getOrCreateType(ReturnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/);
+
+ DebugFactory.InsertSubprogramStart(SP, Builder.GetInsertBlock());
+
+ // Push function on region stack.
+ RegionStack.push_back(SP);
+}
+
+
+void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
+ if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
+
+ // Don't bother if things are the same as last time.
+ SourceManager &SM = M->getContext().getSourceManager();
+ if (CurLoc == PrevLoc
+ || (SM.getInstantiationLineNumber(CurLoc) ==
+ SM.getInstantiationLineNumber(PrevLoc)
+ && SM.isFromSameFile(CurLoc, PrevLoc)))
+ return;
+
+ // Update last state.
+ PrevLoc = CurLoc;
+
+ // Get the appropriate compile unit.
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
+ PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
+ DebugFactory.InsertStopPoint(Unit, PLoc.getLine(), PLoc.getColumn(),
+ Builder.GetInsertBlock());
+}
+
+/// EmitRegionStart- Constructs the debug code for entering a declarative
+/// region - "llvm.dbg.region.start.".
+void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
+ llvm::DIDescriptor D;
+ if (!RegionStack.empty())
+ D = RegionStack.back();
+ D = DebugFactory.CreateBlock(D);
+ RegionStack.push_back(D);
+ DebugFactory.InsertRegionStart(D, Builder.GetInsertBlock());
+}
+
+/// EmitRegionEnd - Constructs the debug code for exiting a declarative
+/// region - "llvm.dbg.region.end."
+void CGDebugInfo::EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Provide an region stop point.
+ EmitStopPoint(Fn, Builder);
+
+ DebugFactory.InsertRegionEnd(RegionStack.back(), Builder.GetInsertBlock());
+ RegionStack.pop_back();
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const VarDecl *Decl, unsigned Tag,
+ llvm::Value *Storage, CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Do not emit variable debug information while generating optimized code.
+ // The llvm optimizer and code generator are not yet ready to support
+ // optimized code debugging.
+ const CompileOptions &CO = M->getCompileOpts();
+ if (CO.OptimizationLevel)
+ return;
+
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
+ llvm::DIType Ty = getOrCreateType(Decl->getType(), Unit);
+
+ // Get location information.
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned Line = 0;
+ if (!PLoc.isInvalid())
+ Line = PLoc.getLine();
+ else
+ Unit = llvm::DICompileUnit();
+
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateVariable(Tag, RegionStack.back(),Decl->getNameAsString(),
+ Unit, Line, Ty);
+ // Insert an llvm.dbg.declare into the current block.
+ DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *Decl,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ EmitDeclare(Decl, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder);
+}
+
+/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+/// variable declaration.
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder) {
+ EmitDeclare(Decl, llvm::dwarf::DW_TAG_arg_variable, AI, Builder);
+}
+
+
+
+/// EmitGlobalVariable - Emit information about a global variable.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ const VarDecl *Decl) {
+
+ // Do not emit variable debug information while generating optimized code.
+ // The llvm optimizer and code generator are not yet ready to support
+ // optimized code debugging.
+ const CompileOptions &CO = M->getCompileOpts();
+ if (CO.OptimizationLevel)
+ return;
+
+ // Create global variable debug descriptor.
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+ std::string Name = Decl->getNameAsString();
+
+ QualType T = Decl->getType();
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = M->getContext().getAsArrayType(T)->getElementType();
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, "", Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
+/// EmitGlobalVariable - Emit information about an objective-c interface.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ ObjCInterfaceDecl *Decl) {
+ // Create global variable debug descriptor.
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+ std::string Name = Decl->getNameAsString();
+
+ QualType T = M->getContext().getObjCInterfaceType(Decl);
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = M->getContext().getAsArrayType(T)->getElementType();
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, "", Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..de65580
--- /dev/null
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,126 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
+#define CLANG_CODEGEN_CGDEBUGINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include <map>
+
+#include "CGBuilder.h"
+
+namespace clang {
+ class VarDecl;
+ class ObjCInterfaceDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
+/// the backend.
+class CGDebugInfo {
+ CodeGenModule *M;
+ bool isMainCompileUnitCreated;
+ llvm::DIFactory DebugFactory;
+
+ SourceLocation CurLoc, PrevLoc;
+
+ /// CompileUnitCache - Cache of previously constructed CompileUnits.
+ llvm::DenseMap<unsigned, llvm::DICompileUnit> CompileUnitCache;
+
+ /// TypeCache - Cache of previously constructed Types.
+ // FIXME: Eliminate this map. Be careful of iterator invalidation.
+ std::map<void *, llvm::DIType> TypeCache;
+
+ bool BlockLiteralGenericSet;
+ llvm::DIType BlockLiteralGeneric;
+
+ std::vector<llvm::DIDescriptor> RegionStack;
+
+ /// Helper functions for getOrCreateType.
+ llvm::DIType CreateType(const BuiltinType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateCVRType(QualType Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const TypedefType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const PointerType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const FunctionType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const TagType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const RecordType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const EnumType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const ArrayType *Ty, llvm::DICompileUnit U);
+
+public:
+ CGDebugInfo(CodeGenModule *m);
+ ~CGDebugInfo();
+
+ /// setLocation - Update the current source location. If \arg loc is
+ /// invalid it is ignored.
+ void setLocation(SourceLocation Loc);
+
+ /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
+ /// source line.
+ void EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
+ /// start of a new function.
+ void EmitFunctionStart(const char *Name, QualType ReturnType,
+ llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
+ /// of a new block.
+ void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
+ /// block.
+ void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
+ /// variable declaration.
+ void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+ /// variable declaration.
+ void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitGlobalVariable - Emit information about a global variable.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+ /// EmitGlobalVariable - Emit information about an objective-c interface.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+
+private:
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+
+ /// getOrCreateCompileUnit - Get the compile unit from the cache or create a
+ /// new one if necessary.
+ llvm::DICompileUnit getOrCreateCompileUnit(SourceLocation Loc);
+
+ /// getOrCreateType - Get the type from the cache or create a new type if
+ /// necessary.
+ llvm::DIType getOrCreateType(QualType Ty, llvm::DICompileUnit Unit);
+};
+} // namespace CodeGen
+} // namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..bcad77b
--- /dev/null
+++ b/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,489 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+ switch (D.getKind()) {
+ default: assert(0 && "Unknown decl kind!");
+ case Decl::ParmVar:
+ assert(0 && "Parmdecls should not be in declstmts!");
+ case Decl::Function: // void X();
+ case Decl::Record: // struct/union/class X;
+ case Decl::Enum: // enum X;
+ case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ // None of these decls require codegen support.
+ return;
+
+ case Decl::Var: {
+ const VarDecl &VD = cast<VarDecl>(D);
+ assert(VD.isBlockVarDecl() &&
+ "Should not see file-scope variables inside a function!");
+ return EmitBlockVarDecl(VD);
+ }
+
+ case Decl::Typedef: { // typedef int X;
+ const TypedefDecl &TD = cast<TypedefDecl>(D);
+ QualType Ty = TD.getUnderlyingType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+ }
+}
+
+/// EmitBlockVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
+ if (D.hasAttr<AsmLabelAttr>())
+ CGM.ErrorUnsupported(&D, "__asm__");
+
+ switch (D.getStorageClass()) {
+ case VarDecl::None:
+ case VarDecl::Auto:
+ case VarDecl::Register:
+ return EmitLocalBlockVarDecl(D);
+ case VarDecl::Static:
+ return EmitStaticBlockVarDecl(D);
+ case VarDecl::Extern:
+ case VarDecl::PrivateExtern:
+ // Don't emit it now, allow it to be emitted lazily on its first use.
+ return;
+ }
+
+ assert(0 && "Unknown storage class");
+}
+
+llvm::GlobalVariable *
+CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes
+ Linkage) {
+ QualType Ty = D.getType();
+ assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+ std::string Name;
+ if (getContext().getLangOptions().CPlusPlus) {
+ Name = CGM.getMangledName(&D);
+ } else {
+ std::string ContextName;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurFuncDecl))
+ ContextName = CGM.getMangledName(FD);
+ else if (isa<ObjCMethodDecl>(CurFuncDecl))
+ ContextName = std::string(CurFn->getNameStart(),
+ CurFn->getNameStart() + CurFn->getNameLen());
+ else
+ assert(0 && "Unknown context for block var decl");
+
+ Name = ContextName + Separator + D.getNameAsString();
+ }
+
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ return new llvm::GlobalVariable(LTy, Ty.isConstant(getContext()), Linkage,
+ llvm::Constant::getNullValue(LTy), Name,
+ &CGM.getModule(), D.isThreadSpecified(),
+ Ty.getAddressSpace());
+}
+
+void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+
+ llvm::GlobalVariable *GV =
+ CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage);
+
+ // Store into LocalDeclMap before generating initializer to handle
+ // circular references.
+ DMEntry = GV;
+
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (D.getType()->isVariablyModifiedType())
+ EmitVLASize(D.getType());
+
+ if (D.getType()->isReferenceType()) {
+ CGM.ErrorUnsupported(&D, "static declaration with reference type");
+ return;
+ }
+
+ if (D.getInit()) {
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
+
+ // If constant emission failed, then this should be a C++ static
+ // initializer.
+ if (!Init) {
+ if (!getContext().getLangOptions().CPlusPlus)
+ CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else
+ GenerateStaticCXXBlockVarDeclInit(D, GV);
+ } else {
+ // The initializer may differ in type from the global. Rewrite
+ // the global to match the initializer. (We have to do this
+ // because some types, like unions, can't be completely represented
+ // in the LLVM type system.)
+ if (GV->getType() != Init->getType()) {
+ llvm::GlobalVariable *OldGV = GV;
+
+ GV = new llvm::GlobalVariable(Init->getType(), OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ &CGM.getModule(), D.isThreadSpecified(),
+ D.getType().getAddressSpace());
+
+ // Steal the name of the old global
+ GV->takeName(OldGV);
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ OldGV->eraseFromParent();
+ }
+
+ GV->setInitializer(Init);
+ }
+ }
+
+ // FIXME: Merge attribute handling.
+ if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ llvm::Constant *Ann =
+ CGM.EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D.getLocation()));
+ CGM.AddAnnotation(Ann);
+ }
+
+ if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ if (D.hasAttr<UsedAttr>())
+ CGM.AddUsedGlobal(GV);
+
+ // We may have to cast the constant because of the initializer
+ // mismatch above.
+ //
+ // FIXME: It is really dangerous to store this in the map; if anyone
+ // RAUW's the GV uses of this constant will be invalid.
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
+ const llvm::Type *LPtrTy =
+ llvm::PointerType::get(LTy, D.getType().getAddressSpace());
+ DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
+
+ // Emit global variable debug descriptor for static vars.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(D.getLocation());
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
+ }
+}
+
+/// BuildByRefType - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper;
+/// void *__destroy_helper;
+/// T x;
+/// } x
+///
+/// Align is the alignment needed in bytes for x.
+const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty,
+ uint64_t Align) {
+ const llvm::Type *LTy = ConvertType(Ty);
+ bool needsCopyDispose = BlockRequiresCopying(Ty);
+ std::vector<const llvm::Type *> Types(needsCopyDispose*2+5);
+ const llvm::PointerType *PtrToInt8Ty
+ = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Types[0] = PtrToInt8Ty;
+ Types[1] = PtrToInt8Ty;
+ Types[2] = llvm::Type::Int32Ty;
+ Types[3] = llvm::Type::Int32Ty;
+ if (needsCopyDispose) {
+ Types[4] = PtrToInt8Ty;
+ Types[5] = PtrToInt8Ty;
+ }
+ // FIXME: Align this on at least an Align boundary.
+ Types[needsCopyDispose*2 + 4] = LTy;
+ return llvm::StructType::get(Types, false);
+}
+
+/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
+ QualType Ty = D.getType();
+ bool isByRef = D.hasAttr<BlocksAttr>();
+ bool needsDispose = false;
+
+ llvm::Value *DeclPtr;
+ if (Ty->isConstantSizeType()) {
+ if (!Target.useGlobalsForAutomaticVariables()) {
+ // A normal fixed sized variable becomes an alloca in the entry block.
+ const llvm::Type *LTy = ConvertTypeForMem(Ty);
+ if (isByRef)
+ LTy = BuildByRefType(Ty, getContext().getDeclAlignInBytes(&D));
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getNameAsString().c_str());
+
+ if (isByRef)
+ Alloc->setAlignment(std::max(getContext().getDeclAlignInBytes(&D),
+ unsigned(Target.getPointerAlign(0) / 8)));
+ else
+ Alloc->setAlignment(getContext().getDeclAlignInBytes(&D));
+ DeclPtr = Alloc;
+ } else {
+ // Targets that don't support recursion emit locals as globals.
+ const char *Class =
+ D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
+ DeclPtr = CreateStaticBlockVarDecl(D, Class,
+ llvm::GlobalValue
+ ::InternalLinkage);
+ }
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ } else {
+ if (!DidCallStackSave) {
+ // Save the stack.
+ const llvm::Type *LTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+
+ Builder.CreateStore(V, Stack);
+
+ DidCallStackSave = true;
+
+ {
+ // Push a cleanup block and restore the stack there.
+ CleanupScope scope(*this);
+
+ V = Builder.CreateLoad(Stack, "tmp");
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ Builder.CreateCall(F, V);
+ }
+ }
+
+ // Get the element type.
+ const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
+ const llvm::Type *LElemPtrTy =
+ llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
+
+ llvm::Value *VLASize = EmitVLASize(Ty);
+
+ // Downcast the VLA size expression
+ VLASize = Builder.CreateIntCast(VLASize, llvm::Type::Int32Ty, false, "tmp");
+
+ // Allocate memory for the array.
+ llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::Int8Ty, VLASize, "vla");
+ DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for local var declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+ }
+ else if (isByRef) {
+ llvm::Value *Loc;
+ bool needsCopyDispose = BlockRequiresCopying(Ty);
+ Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc, false);
+ Loc = Builder.CreateBitCast(Loc, DeclPtr->getType());
+ Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x");
+ DI->EmitDeclareOfAutoVariable(&D, Loc, Builder);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
+
+ // If this local has an initializer, emit it now.
+ if (const Expr *Init = D.getInit()) {
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef) {
+ bool needsCopyDispose = BlockRequiresCopying(Ty);
+ Loc = Builder.CreateStructGEP(DeclPtr, needsCopyDispose*2+4, "x");
+ }
+ if (Ty->isReferenceType()) {
+ llvm::Value *V = EmitReferenceBindingToExpr(Init, Ty).getScalarVal();
+ EmitStoreOfScalar(V, Loc, false, Ty);
+ } else if (!hasAggregateLLVMType(Init->getType())) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(),
+ D.getType());
+ } else if (Init->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified());
+ } else {
+ EmitAggExpr(Init, Loc, D.getType().isVolatileQualified());
+ }
+ }
+ if (isByRef) {
+ const llvm::PointerType *PtrToInt8Ty
+ = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
+ llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
+ llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
+ llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
+ llvm::Value *V;
+ int flag = 0;
+ int flags = 0;
+
+ needsDispose = true;
+
+ if (Ty->isBlockPointerType()) {
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ } else if (BlockRequiresCopying(Ty)) {
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ }
+
+ // FIXME: Someone double check this.
+ if (Ty.isObjCGCWeak())
+ flag |= BLOCK_FIELD_IS_WEAK;
+
+ int isa = 0;
+ if (flag&BLOCK_FIELD_IS_WEAK)
+ isa = 1;
+ V = llvm::ConstantInt::get(llvm::Type::Int32Ty, isa);
+ V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
+ Builder.CreateStore(V, isa_field);
+
+ V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding");
+ Builder.CreateStore(V, forwarding_field);
+
+ V = llvm::ConstantInt::get(llvm::Type::Int32Ty, flags);
+ Builder.CreateStore(V, flags_field);
+
+ const llvm::Type *V1;
+ V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
+ V = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ (CGM.getTargetData().getTypeStoreSizeInBits(V1)
+ / 8));
+ Builder.CreateStore(V, size_field);
+
+ if (flags & BLOCK_HAS_COPY_DISPOSE) {
+ BlockHasCopyDispose = true;
+ llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
+ Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag),
+ copy_helper);
+
+ llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
+ Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag),
+ destroy_helper);
+ }
+ }
+
+ // Handle the cleanup attribute
+ if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+ const FunctionDecl *FD = CA->getFunctionDecl();
+
+ llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD));
+ assert(F && "Could not find function!");
+
+ CleanupScope scope(*this);
+
+ const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
+
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = Info.arg_begin()->type;
+ DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy));
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(DeclPtr),
+ getContext().getPointerType(D.getType())));
+
+ EmitCall(Info, F, Args);
+ }
+
+ if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
+ CleanupScope scope(*this);
+ llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ V = Builder.CreateLoad(V, false);
+ BuildBlockRelease(V);
+ }
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
+ // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+ assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+ "Invalid argument to EmitParmDecl");
+ QualType Ty = D.getType();
+
+ llvm::Value *DeclPtr;
+ if (!Ty->isConstantSizeType()) {
+ // Variable sized values always are passed by-reference.
+ DeclPtr = Arg;
+ } else {
+ // A fixed sized single-value variable becomes an alloca in the entry block.
+ const llvm::Type *LTy = ConvertTypeForMem(Ty);
+ if (LTy->isSingleValueType()) {
+ // TODO: Alignment
+ std::string Name = D.getNameAsString();
+ Name += ".addr";
+ DeclPtr = CreateTempAlloca(LTy);
+ DeclPtr->setName(Name.c_str());
+
+ // Store the initial value into the alloca.
+ EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty);
+ } else {
+ // Otherwise, if this is an aggregate, just use the input pointer.
+ DeclPtr = Arg;
+ }
+ Arg->setName(D.getNameAsString());
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for param declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);
+ }
+}
+
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..c5f2387
--- /dev/null
+++ b/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,1324 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCall.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+// Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
+ const char *Name) {
+ if (!Builder.isNamePreserving())
+ Name = "";
+ return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+ QualType BoolTy = getContext().BoolTy;
+ if (!E->getType()->isAnyComplexType())
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which can have
+/// any type. The result is returned as an RValue struct. If this is an
+/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+/// the result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
+ bool isAggLocVolatile, bool IgnoreResult) {
+ if (!hasAggregateLLVMType(E->getType()))
+ return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ else if (E->getType()->isAnyComplexType())
+ return RValue::getComplex(EmitComplexExpr(E, false, false,
+ IgnoreResult, IgnoreResult));
+
+ EmitAggExpr(E, AggLoc, isAggLocVolatile, IgnoreResult);
+ return RValue::getAggregate(AggLoc, isAggLocVolatile);
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result
+/// will always be accessible even if no aggregate location is
+/// provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc,
+ bool isAggLocVolatile) {
+ if (!AggLoc && hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp");
+ return EmitAnyExpr(E, AggLoc, isAggLocVolatile);
+}
+
+RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+ QualType DestType) {
+ RValue Val;
+ if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+ // Emit the expr as an lvalue.
+ LValue LV = EmitLValue(E);
+ if (LV.isSimple())
+ return RValue::get(LV.getAddress());
+ Val = EmitLoadOfLValue(LV, E->getType());
+ } else {
+ Val = EmitAnyExprToTemp(E);
+ }
+
+ if (Val.isAggregate()) {
+ Val = RValue::get(Val.getAggregateAddr());
+ } else {
+ // Create a temporary variable that we can bind the reference to.
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()),
+ "reftmp");
+ if (Val.isScalar())
+ EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType());
+ else
+ StoreComplexToAddr(Val.getComplexVal(), Temp, false);
+ Val = RValue::get(Temp);
+ }
+
+ return Val;
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return
+/// the input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+ const llvm::Constant *Elts) {
+ if (isa<llvm::ConstantAggregateZero>(Elts))
+ return 0;
+
+ return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+}
+
+
+//===----------------------------------------------------------------------===//
+// LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+ if (Ty->isVoidType()) {
+ return RValue::get(0);
+ } else if (const ComplexType *CTy = Ty->getAsComplexType()) {
+ const llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return RValue::getComplex(std::make_pair(U, U));
+ } else if (hasAggregateLLVMType(Ty)) {
+ const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty));
+ return RValue::getAggregate(llvm::UndefValue::get(LTy));
+ } else {
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+ }
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+ return LValue::MakeAddr(llvm::UndefValue::get(Ty),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield
+/// reference. In either case, the LLVM Value* in the LValue structure is
+/// guaranteed to be an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of
+/// the LLVM value is known: For example, it may not be a pointer to an
+/// integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed
+/// size, this method guarantees that the returned pointer type will point to
+/// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+/// variable length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+ switch (E->getStmtClass()) {
+ default: return EmitUnsupportedLValue(E, "l-value expression");
+
+ case Expr::BinaryOperatorClass:
+ return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ return EmitCallExprLValue(cast<CallExpr>(E));
+ case Expr::VAArgExprClass:
+ return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+ case Expr::DeclRefExprClass:
+ case Expr::QualifiedDeclRefExprClass:
+ return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::PredefinedExprClass:
+ return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ case Expr::StringLiteralClass:
+ return EmitStringLiteralLValue(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+
+ case Expr::BlockDeclRefExprClass:
+ return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+
+ case Expr::CXXConditionDeclExprClass:
+ return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E));
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass:
+ return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+ case Expr::CXXBindTemporaryExprClass:
+ return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+
+ case Expr::ObjCMessageExprClass:
+ return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+ case Expr::ObjCIvarRefExprClass:
+ return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+ case Expr::ObjCPropertyRefExprClass:
+ return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
+ case Expr::ObjCKVCRefExprClass:
+ return EmitObjCKVCRefLValue(cast<ObjCKVCRefExpr>(E));
+ case Expr::ObjCSuperExprClass:
+ return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
+
+ case Expr::StmtExprClass:
+ return EmitStmtExprLValue(cast<StmtExpr>(E));
+ case Expr::UnaryOperatorClass:
+ return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+ case Expr::ArraySubscriptExprClass:
+ return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::ExtVectorElementExprClass:
+ return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::CompoundLiteralExprClass:
+ return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+ case Expr::ConditionalOperatorClass:
+ return EmitConditionalOperator(cast<ConditionalOperator>(E));
+ case Expr::ChooseExprClass:
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ return EmitCastLValue(cast<CastExpr>(E));
+ }
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ QualType Ty) {
+ llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp");
+
+ // Bool can have different representation in memory than in registers.
+ if (Ty->isBooleanType())
+ if (V->getType() != llvm::Type::Int1Ty)
+ V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool");
+
+ return V;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, QualType Ty) {
+
+ if (Ty->isBooleanType()) {
+ // Bool can have different representation in memory than in registers.
+ const llvm::Type *SrcTy = Value->getType();
+ const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
+ if (DstPtr->getElementType() != SrcTy) {
+ const llvm::Type *MemTy =
+ llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
+ Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
+ }
+ }
+
+ Builder.CreateStore(Value, Addr, Volatile);
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+/// this method emits the address of the lvalue, then loads the result as an
+/// rvalue, returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
+ if (LV.isObjCWeak()) {
+ // load of a __weak object.
+ llvm::Value *AddrWeakObj = LV.getAddress();
+ llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj);
+ return RValue::get(read_weak);
+ }
+
+ if (LV.isSimple()) {
+ llvm::Value *Ptr = LV.getAddress();
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+
+ // Simple scalar l-value.
+ if (EltTy->isSingleValueType())
+ return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
+ ExprType));
+
+ assert(ExprType->isFunctionType() && "Unknown scalar value");
+ return RValue::get(Ptr);
+ }
+
+ if (LV.isVectorElt()) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+ return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+ "vecext"));
+ }
+
+ // If this is a reference to a subset of the elements of a vector, either
+ // shuffle the input or extract/insert them as appropriate.
+ if (LV.isExtVectorElt())
+ return EmitLoadOfExtVectorElementLValue(LV, ExprType);
+
+ if (LV.isBitfield())
+ return EmitLoadOfBitfieldLValue(LV, ExprType);
+
+ if (LV.isPropertyRef())
+ return EmitLoadOfPropertyRefLValue(LV, ExprType);
+
+ assert(LV.isKVCRef() && "Unknown LValue type!");
+ return EmitLoadOfKVCRefLValue(LV, ExprType);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
+ QualType ExprType) {
+ unsigned StartBit = LV.getBitfieldStartBit();
+ unsigned BitfieldSize = LV.getBitfieldSize();
+ llvm::Value *Ptr = LV.getBitfieldAddr();
+
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+
+ // In some cases the bitfield may straddle two memory locations.
+ // Currently we load the entire bitfield, then do the magic to
+ // sign-extend it if necessary. This results in somewhat more code
+ // than necessary for the common case (one load), since two shifts
+ // accomplish both the masking and sign extension.
+ unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
+ llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp");
+
+ // Shift to proper location.
+ if (StartBit)
+ Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit),
+ "bf.lo");
+
+ // Mask off unused bits.
+ llvm::Constant *LowMask =
+ llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, LowBits));
+ Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared");
+
+ // Fetch the high bits if necessary.
+ if (LowBits < BitfieldSize) {
+ unsigned HighBits = BitfieldSize - LowBits;
+ llvm::Value *HighPtr =
+ Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
+ "bf.ptr.hi");
+ llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+ LV.isVolatileQualified(),
+ "tmp");
+
+ // Mask off unused bits.
+ llvm::Constant *HighMask =
+ llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, HighBits));
+ HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared");
+
+ // Shift to proper location and or in to bitfield value.
+ HighVal = Builder.CreateShl(HighVal,
+ llvm::ConstantInt::get(EltTy, LowBits));
+ Val = Builder.CreateOr(Val, HighVal, "bf.val");
+ }
+
+ // Sign extend if necessary.
+ if (LV.isBitfieldSigned()) {
+ llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy,
+ EltTySize - BitfieldSize);
+ Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits),
+ ExtraBits, "bf.val.sext");
+ }
+
+ // The bitfield type and the normal type differ when the storage sizes
+ // differ (currently just _Bool).
+ Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp");
+
+ return RValue::get(Val);
+}
+
+RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getPropertyRefExpr());
+}
+
+RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getKVCRefExpr());
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
+ QualType ExprType) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+
+ // If the result of the expression is a non-vector type, we must be
+ // extracting a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = ExprType->getAsVectorType();
+ if (!ExprVT) {
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
+ }
+
+ // Always use shuffle vector to try to retain the original program structure
+ unsigned NumResultElts = ExprVT->getNumElements();
+
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumResultElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx));
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ return RValue::get(Vec);
+}
+
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+ QualType Ty) {
+ if (!Dst.isSimple()) {
+ if (Dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getVectorIdx(), "vecins");
+ Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+ return;
+ }
+
+ // If this is an update of extended vector elements, insert them as
+ // appropriate.
+ if (Dst.isExtVectorElt())
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
+
+ if (Dst.isBitfield())
+ return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
+
+ if (Dst.isPropertyRef())
+ return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
+
+ if (Dst.isKVCRef())
+ return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
+
+ assert(0 && "Unknown LValue type");
+ }
+
+ if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+ // load of a __weak object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+ // load of a __strong object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+#if 0
+ // FIXME. We cannot positively determine if we have an 'ivar' assignment,
+ // object assignment or an unknown assignment. For now, generate call to
+ // objc_assign_strongCast assignment which is a safe, but consevative
+ // assumption.
+ if (Dst.isObjCIvar())
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst);
+ else
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
+#endif
+ if (Dst.isGlobalObjCRef())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
+ else
+ CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ assert(Src.isScalar() && "Can't emit an agg store with this method");
+ EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
+ Dst.isVolatileQualified(), Ty);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ QualType Ty,
+ llvm::Value **Result) {
+ unsigned StartBit = Dst.getBitfieldStartBit();
+ unsigned BitfieldSize = Dst.getBitfieldSize();
+ llvm::Value *Ptr = Dst.getBitfieldAddr();
+
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+
+ // Get the new value, cast to the appropriate type and masked to
+ // exactly the size of the bit-field.
+ llvm::Value *SrcVal = Src.getScalarVal();
+ llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp");
+ llvm::Constant *Mask =
+ llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
+ NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value");
+
+ // Return the new value of the bit-field, if requested.
+ if (Result) {
+ // Cast back to the proper type for result.
+ const llvm::Type *SrcTy = SrcVal->getType();
+ llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false,
+ "bf.reload.val");
+
+ // Sign extend if necessary.
+ if (Dst.isBitfieldSigned()) {
+ unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy);
+ llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy,
+ SrcTySize - BitfieldSize);
+ SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits),
+ ExtraBits, "bf.reload.sext");
+ }
+
+ *Result = SrcTrunc;
+ }
+
+ // In some cases the bitfield may straddle two memory locations.
+ // Emit the low part first and check to see if the high needs to be
+ // done.
+ unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
+ llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
+ "bf.prev.low");
+
+ // Compute the mask for zero-ing the low part of this bitfield.
+ llvm::Constant *InvMask =
+ llvm::ConstantInt::get(~llvm::APInt::getBitsSet(EltTySize, StartBit,
+ StartBit + LowBits));
+
+ // Compute the new low part as
+ // LowVal = (LowVal & InvMask) | (NewVal << StartBit),
+ // with the shift of NewVal implicitly stripping the high bits.
+ llvm::Value *NewLowVal =
+ Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit),
+ "bf.value.lo");
+ LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared");
+ LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo");
+
+ // Write back.
+ Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified());
+
+ // If the low part doesn't cover the bitfield emit a high part.
+ if (LowBits < BitfieldSize) {
+ unsigned HighBits = BitfieldSize - LowBits;
+ llvm::Value *HighPtr =
+ Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
+ "bf.ptr.hi");
+ llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+ Dst.isVolatileQualified(),
+ "bf.prev.hi");
+
+ // Compute the mask for zero-ing the high part of this bitfield.
+ llvm::Constant *InvMask =
+ llvm::ConstantInt::get(~llvm::APInt::getLowBitsSet(EltTySize, HighBits));
+
+ // Compute the new high part as
+ // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits),
+ // where the high bits of NewVal have already been cleared and the
+ // shift stripping the low bits.
+ llvm::Value *NewHighVal =
+ Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits),
+ "bf.value.high");
+ HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared");
+ HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi");
+
+ // Write back.
+ Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified());
+ }
+}
+
+void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ // This access turns into a read/modify/write of the vector. Load the input
+ // value now.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (const VectorType *VTy = Ty->getAsVectorType()) {
+ unsigned NumSrcElts = VTy->getNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ if (NumDstElts == NumSrcElts) {
+ // Use shuffle vector is the src and destination are the same number
+ // of elements
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx));
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ }
+ else if (NumDstElts > NumSrcElts) {
+ // Extended the source vector to the same length and then shuffle it
+ // into the destination.
+ // FIXME: since we're shuffling with undef, can we just use the indices
+ // into that? This could be simpler.
+ llvm::SmallVector<llvm::Constant*, 4> ExtMask;
+ unsigned i;
+ for (i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
+ for (; i != NumDstElts; ++i)
+ ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty));
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
+ ExtMask.size());
+ llvm::Value *ExtSrcVal =
+ Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(SrcVal->getType()),
+ ExtMaskV, "tmp");
+ // build identity
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumDstElts; ++i) {
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
+ }
+ // modify when what gets shuffled in
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned Idx = getAccessedFieldNo(i, Elts);
+ Mask[Idx] =llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts);
+ }
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
+ }
+ else {
+ // We should never shorten the vector
+ assert(0 && "unexpected shorten vector length");
+ }
+ } else {
+ // If the Src is a scalar (not a vector) it must be updating one element.
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
+ }
+
+ Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+ const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
+
+ if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) ||
+ isa<ImplicitParamDecl>(VD))) {
+ LValue LV;
+ bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
+ if (VD->hasExternalStorage()) {
+ llvm::Value *V = CGM.GetAddrOfGlobalVar(VD);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+ LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+ else {
+ llvm::Value *V = LocalDeclMap[VD];
+ assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+ // local variables do not get their gc attribute set.
+ QualType::GCAttrTypes attr = QualType::GCNone;
+ // local static?
+ if (!NonGCable)
+ attr = getContext().getObjCGCAttrKind(E->getType());
+ if (VD->hasAttr<BlocksAttr>()) {
+ bool needsCopyDispose = BlockRequiresCopying(VD->getType());
+ const llvm::Type *PtrStructTy = V->getType();
+ const llvm::Type *Ty = PtrStructTy;
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateBitCast(V, Ty);
+ V = Builder.CreateLoad(V, false);
+ V = Builder.CreateBitCast(V, PtrStructTy);
+ V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x");
+ }
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+ LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr);
+ }
+ LValue::SetObjCNonGC(LV, NonGCable);
+ return LV;
+ } else if (VD && VD->isFileVarDecl()) {
+ llvm::Value *V = CGM.GetAddrOfGlobalVar(VD);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+ LValue LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ if (LV.isObjCStrong())
+ LV.SetGlobalObjCRef(LV, true);
+ return LV;
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) {
+ llvm::Value* V = CGM.GetAddrOfFunction(GlobalDecl(FD));
+ if (!FD->hasPrototype()) {
+ if (const FunctionProtoType *Proto =
+ FD->getType()->getAsFunctionProtoType()) {
+ // Ugly case: for a K&R-style definition, the type of the definition
+ // isn't the same as the type of a use. Correct for this with a
+ // bitcast.
+ QualType NoProtoType =
+ getContext().getFunctionNoProtoType(Proto->getResultType());
+ NoProtoType = getContext().getPointerType(NoProtoType);
+ V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp");
+ }
+ }
+ return LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+ else if (const ImplicitParamDecl *IPD =
+ dyn_cast<ImplicitParamDecl>(E->getDecl())) {
+ llvm::Value *V = LocalDeclMap[IPD];
+ assert(V && "BlockVarDecl not entered in LocalDeclMap?");
+ return LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+ assert(0 && "Unimp declref");
+ //an invalid LValue, but the assert will
+ //ensure that this point is never reached.
+ return LValue();
+}
+
+LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
+ return LValue::MakeAddr(GetAddrOfBlockDecl(E),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+ // __extension__ doesn't affect lvalue-ness.
+ if (E->getOpcode() == UnaryOperator::Extension)
+ return EmitLValue(E->getSubExpr());
+
+ QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+ switch (E->getOpcode()) {
+ default: assert(0 && "Unknown unary operator lvalue!");
+ case UnaryOperator::Deref:
+ {
+ QualType T =
+ E->getSubExpr()->getType()->getAsPointerType()->getPointeeType();
+ LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()),
+ ExprTy->getAsPointerType()->getPointeeType()
+ .getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(T));
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ LValue LV = EmitLValue(E->getSubExpr());
+ unsigned Idx = E->getOpcode() == UnaryOperator::Imag;
+ return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(),
+ Idx, "idx"),
+ ExprTy.getCVRQualifiers());
+ }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0);
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0);
+}
+
+
+LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) {
+ std::string GlobalVarName;
+
+ switch (Type) {
+ default:
+ assert(0 && "Invalid type");
+ case PredefinedExpr::Func:
+ GlobalVarName = "__func__.";
+ break;
+ case PredefinedExpr::Function:
+ GlobalVarName = "__FUNCTION__.";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ // FIXME:: Demangle C++ method names
+ GlobalVarName = "__PRETTY_FUNCTION__.";
+ break;
+ }
+
+ // FIXME: This isn't right at all. The logic for computing this should go
+ // into a method on PredefinedExpr. This would allow sema and codegen to be
+ // consistent for things like sizeof(__func__) etc.
+ std::string FunctionName;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
+ FunctionName = CGM.getMangledName(FD);
+ } else {
+ // Just get the mangled name; skipping the asm prefix if it
+ // exists.
+ FunctionName = CurFn->getName();
+ if (FunctionName[0] == '\01')
+ FunctionName = FunctionName.substr(1, std::string::npos);
+ }
+
+ GlobalVarName += FunctionName;
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ return LValue::MakeAddr(C, 0);
+}
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ switch (E->getIdentType()) {
+ default:
+ return EmitUnsupportedLValue(E, "predefined expression");
+ case PredefinedExpr::Func:
+ case PredefinedExpr::Function:
+ case PredefinedExpr::PrettyFunction:
+ return EmitPredefinedFunctionName(E->getIdentType());
+ }
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // The index must always be an integer, which is not an aggregate. Emit it.
+ llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+
+ // If the base is a vector type, then we are forming a vector element lvalue
+ // with this subscript.
+ if (E->getBase()->getType()->isVectorType()) {
+ // Emit the vector as an lvalue to get its address.
+ LValue LHS = EmitLValue(E->getBase());
+ assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+ // FIXME: This should properly sign/zero/extend or truncate Idx to i32.
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+ E->getBase()->getType().getCVRQualifiers());
+ }
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerType();
+ unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (IdxBitwidth != LLVMPointerWidth)
+ Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth),
+ IdxSigned, "idxprom");
+
+ // We know that the pointer points to a type of the correct size,
+ // unless the size is a VLA or Objective-C interface.
+ llvm::Value *Address = 0;
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(E->getType())) {
+ llvm::Value *VLASize = VLASizeMap[VAT];
+
+ Idx = Builder.CreateMul(Idx, VLASize);
+
+ QualType BaseType = getContext().getBaseElementType(VAT);
+
+ uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8;
+ Idx = Builder.CreateUDiv(Idx,
+ llvm::ConstantInt::get(Idx->getType(),
+ BaseTypeSize));
+ Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ } else if (const ObjCInterfaceType *OIT =
+ dyn_cast<ObjCInterfaceType>(E->getType())) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ getContext().getTypeSize(OIT) / 8);
+
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+
+ llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
+ Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
+ } else {
+ Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ }
+
+ QualType T = E->getBase()->getType()->getAsPointerType()->getPointeeType();
+ LValue LV = LValue::MakeAddr(Address,
+ T.getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(T));
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC)
+ LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ return LV;
+}
+
+static
+llvm::Constant *GenerateConstantVector(llvm::SmallVector<unsigned, 4> &Elts) {
+ llvm::SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i)
+ CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i]));
+
+ return llvm::ConstantVector::get(&CElts[0], CElts.size());
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+ // Emit the base vector as an l-value.
+ LValue Base;
+
+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+ if (!E->isArrow()) {
+ assert(E->getBase()->getType()->isVectorType());
+ Base = EmitLValue(E->getBase());
+ } else {
+ const PointerType *PT = E->getBase()->getType()->getAsPointerType();
+ llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers());
+ }
+
+ // Encode the element access list into a vector of unsigned indices.
+ llvm::SmallVector<unsigned, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Base.isSimple()) {
+ llvm::Constant *CV = GenerateConstantVector(Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV,
+ Base.getQualifiers());
+ }
+ assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+ llvm::Constant *BaseElts = Base.getExtVectorElts();
+ llvm::SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
+ if (isa<llvm::ConstantAggregateZero>(BaseElts))
+ CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ else
+ CElts.push_back(BaseElts->getOperand(Indices[i]));
+ }
+ llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
+ Base.getQualifiers());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+ bool isUnion = false;
+ bool isIvar = false;
+ bool isNonGC = false;
+ Expr *BaseExpr = E->getBase();
+ llvm::Value *BaseValue = NULL;
+ unsigned CVRQualifiers=0;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy =
+ BaseExpr->getType()->getAsPointerType();
+ if (PTy->getPointeeType()->isUnionType())
+ isUnion = true;
+ CVRQualifiers = PTy->getPointeeType().getCVRQualifiers();
+ } else if (isa<ObjCPropertyRefExpr>(BaseExpr) ||
+ isa<ObjCKVCRefExpr>(BaseExpr)) {
+ RValue RV = EmitObjCPropertyGet(BaseExpr);
+ BaseValue = RV.getAggregateAddr();
+ if (BaseExpr->getType()->isUnionType())
+ isUnion = true;
+ CVRQualifiers = BaseExpr->getType().getCVRQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ if (BaseLV.isObjCIvar())
+ isIvar = true;
+ if (BaseLV.isNonGC())
+ isNonGC = true;
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ if (BaseExpr->getType()->isUnionType())
+ isUnion = true;
+ CVRQualifiers = BaseExpr->getType().getCVRQualifiers();
+ }
+
+ FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl());
+ // FIXME: Handle non-field member expressions
+ assert(Field && "No code generation for non-field member references");
+ LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion,
+ CVRQualifiers);
+ LValue::SetObjCIvar(MemExpLV, isIvar);
+ LValue::SetObjCNonGC(MemExpLV, isNonGC);
+ return MemExpLV;
+}
+
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
+ FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ // FIXME: CodeGenTypes should expose a method to get the appropriate type for
+ // FieldTy (the appropriate type is ABI-dependent).
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(Field->getType());
+ const llvm::PointerType *BaseTy =
+ cast<llvm::PointerType>(BaseValue->getType());
+ unsigned AS = BaseTy->getAddressSpace();
+ BaseValue = Builder.CreateBitCast(BaseValue,
+ llvm::PointerType::get(FieldTy, AS),
+ "tmp");
+ llvm::Value *V = Builder.CreateGEP(BaseValue,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, idx),
+ "tmp");
+
+ CodeGenTypes::BitFieldInfo bitFieldInfo =
+ CGM.getTypes().getBitFieldInfo(Field);
+ return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size,
+ Field->getType()->isSignedIntegerType(),
+ Field->getType().getCVRQualifiers()|CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
+ FieldDecl* Field,
+ bool isUnion,
+ unsigned CVRQualifiers)
+{
+ if (Field->isBitField())
+ return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
+
+ unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+ // Match union field type.
+ if (isUnion) {
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(Field->getType());
+ const llvm::PointerType * BaseTy =
+ cast<llvm::PointerType>(BaseValue->getType());
+ unsigned AS = BaseTy->getAddressSpace();
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::get(FieldTy, AS),
+ "tmp");
+ }
+ if (Field->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+
+ QualType::GCAttrTypes attr = QualType::GCNone;
+ if (CGM.getLangOptions().ObjC1 &&
+ CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ QualType Ty = Field->getType();
+ attr = Ty.getObjCGCAttr();
+ if (attr != QualType::GCNone) {
+ // __weak attribute on a field is ignored.
+ if (attr == QualType::Weak)
+ attr = QualType::GCNone;
+ }
+ else if (getContext().isObjCObjectPointerType(Ty))
+ attr = QualType::Strong;
+ }
+ LValue LV =
+ LValue::MakeAddr(V,
+ Field->getType().getCVRQualifiers()|CVRQualifiers,
+ attr);
+ return LV;
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
+ const llvm::Type *LTy = ConvertType(E->getType());
+ llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral");
+
+ const Expr* InitExpr = E->getInitializer();
+ LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers());
+
+ if (E->getType()->isComplexType()) {
+ EmitComplexExprIntoAddr(InitExpr, DeclPtr, false);
+ } else if (hasAggregateLLVMType(E->getType())) {
+ EmitAnyExpr(InitExpr, DeclPtr, false);
+ } else {
+ EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType());
+ }
+
+ return Result;
+}
+
+LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) {
+ // We don't handle vectors yet.
+ if (E->getType()->isVectorType())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAggExpr(E, Temp, false);
+
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+
+}
+
+/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code
+/// generator in an lvalue context, then it must mean that we need the address
+/// of an aggregate in order to access one of its fields. This can happen for
+/// all the reasons that casts are permitted with aggregate result, including
+/// noop aggregate casts, and cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ // If this is an aggregate-to-aggregate cast, just use the input's address as
+ // the lvalue.
+ if (getContext().hasSameUnqualifiedType(E->getType(),
+ E->getSubExpr()->getType()))
+ return EmitLValue(E->getSubExpr());
+
+ // Otherwise, we must have a cast from scalar to union.
+ assert(E->getType()->isUnionType() && "Expected scalar-to-union cast");
+
+ // Casts are only lvalues when the source and destination types are the same.
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAnyExpr(E->getSubExpr(), Temp, false);
+
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+//===--------------------------------------------------------------------===//
+// Expression Emission
+//===--------------------------------------------------------------------===//
+
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) {
+ // Builtins never have block type.
+ if (E->getCallee()->getType()->isBlockPointerType())
+ return EmitBlockCallExpr(E);
+
+ if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
+ return EmitCXXMemberCallExpr(CE);
+
+ const Decl *TargetDecl = 0;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
+ if (unsigned builtinID = FD->getBuiltinID(getContext()))
+ return EmitBuiltinExpr(FD, builtinID, E);
+ }
+ }
+
+ if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ return EmitCXXOperatorMemberCallExpr(CE, MD);
+ }
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ return EmitCall(Callee, E->getCallee()->getType(),
+ E->arg_begin(), E->arg_end(), TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+ // Comma expressions just emit their LHS then their RHS as an l-value.
+ if (E->getOpcode() == BinaryOperator::Comma) {
+ EmitAnyExpr(E->getLHS());
+ return EmitLValue(E->getRHS());
+ }
+
+ // Can only get l-value for binary operator expressions which are a
+ // simple assignment of aggregate type.
+ if (E->getOpcode() != BinaryOperator::Assign)
+ return EmitUnsupportedLValue(E, "binary l-value expression");
+
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAggExpr(E, Temp, false);
+ // FIXME: Are these qualifiers correct?
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+ RValue RV = EmitCallExpr(E);
+
+ if (RV.isScalar()) {
+ assert(E->getCallReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+ // FIXME: This shouldn't require another copy.
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAggExpr(E, Temp, false);
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) {
+ EmitLocalBlockVarDecl(*E->getVarDecl());
+ return EmitDeclRefLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp");
+ EmitCXXConstructExpr(Temp, E);
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ PushCXXTemporary(E->getTemporary(), LV.getAddress());
+
+ return LV;
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitObjCMessageExpr(E);
+ // FIXME: can this be volatile?
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+ Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+ // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+ llvm::Value *BaseValue = 0;
+ const Expr *BaseExpr = E->getBase();
+ unsigned CVRQualifiers = 0;
+ QualType ObjectTy;
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAsPointerType();
+ ObjectTy = PTy->getPointeeType();
+ CVRQualifiers = ObjectTy.getCVRQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ ObjectTy = BaseExpr->getType();
+ CVRQualifiers = ObjectTy.getCVRQualifiers();
+ }
+
+ return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers);
+}
+
+LValue
+CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or
+ // store through it.
+ return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or
+ // store through it.
+ return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
+ return EmitUnsupportedLValue(E, "use of super");
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitAnyExprToTemp(E);
+ // FIXME: can this be volatile?
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+
+RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl) {
+ // Get the actual function type. The callee type will always be a
+ // pointer to function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
+ "Call must have function pointer type!");
+
+ QualType FnType = CalleeType->getAsPointerType()->getPointeeType();
+ QualType ResultType = FnType->getAsFunctionType()->getResultType();
+
+ CallArgList Args;
+ EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd);
+
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
+ Callee, Args, TargetDecl);
+}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..469c830
--- /dev/null
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,554 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ llvm::Value *DestPtr;
+ bool VolatileDest;
+ bool IgnoreResult;
+
+public:
+ AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
+ bool ignore)
+ : CGF(cgf), Builder(CGF.Builder),
+ DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+ /// represents a value lvalue, this method emits the address of the lvalue,
+ /// then loads the result into DestPtr.
+ void EmitAggLoadOfLValue(const Expr *E);
+
+ /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+ void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ void VisitStmt(Stmt *S) {
+ CGF.ErrorUnsupported(S, "aggregate expression");
+ }
+ void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+
+ // l-values.
+ void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
+ void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+ void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+ void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ // Operators.
+ void VisitCStyleCastExpr(CStyleCastExpr *E);
+ void VisitImplicitCastExpr(ImplicitCastExpr *E);
+ void VisitCallExpr(const CallExpr *E);
+ void VisitStmtExpr(const StmtExpr *E);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitBinAssign(const BinaryOperator *E);
+ void VisitBinComma(const BinaryOperator *E);
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+ void VisitObjCKVCRefExpr(ObjCKVCRefExpr *E);
+
+ void VisitConditionalOperator(const ConditionalOperator *CO);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ Visit(DAE->getExpr());
+ }
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+
+ void VisitVAArgExpr(VAArgExpr *E);
+
+ void EmitInitializationToLValue(Expr *E, LValue Address);
+ void EmitNullInitializationToLValue(LValue Address, QualType T);
+ // case Expr::ChooseExprClass:
+
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
+ assert(Src.isAggregate() && "value must be aggregate value!");
+
+ // If the result is ignored, don't copy from the value.
+ if (DestPtr == 0) {
+ if (!Src.isVolatileQualified() || (IgnoreResult && Ignore))
+ return;
+ // If the source is volatile, we must read from it; to do that, we need
+ // some place to put it.
+ DestPtr = CGF.CreateTempAlloca(CGF.ConvertType(E->getType()), "agg.tmp");
+ }
+
+ // If the result of the assignment is used, copy the LHS there also.
+ // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
+ // from the source as well, as we can't eliminate it if either operand
+ // is volatile, unless copy has volatile for both source and destination..
+ CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
+ VolatileDest|Src.isVolatileQualified());
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
+ assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
+
+ EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
+ Src.isVolatileQualified()),
+ Ignore);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ // GCC union extension
+ if (E->getType()->isUnionType()) {
+ RecordDecl *SD = E->getType()->getAsRecordType()->getDecl();
+ LValue FieldLoc = CGF.EmitLValueForField(DestPtr,
+ *SD->field_begin(CGF.getContext()),
+ true, 0);
+ EmitInitializationToLValue(E->getSubExpr(), FieldLoc);
+ return;
+ }
+
+ Visit(E->getSubExpr());
+}
+
+void AggExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType()) {
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ RValue RV = CGF.EmitCallExpr(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ RValue RV = CGF.EmitObjCMessageExpr(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
+ CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+ CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType())
+ && "Invalid assignment");
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // We have to special case property setters, otherwise we must have
+ // a simple lvalue (no aggregates inside vectors, bitfields).
+ if (LHS.isPropertyRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ }
+ else if (LHS.isKVCRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ } else {
+ // Codegen the RHS so that it stores directly into the LHS.
+ CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified());
+ EmitFinalDestCopy(E, LHS, true);
+ }
+}
+
+void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
+ Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for aggregate value");
+
+ Visit(E->getLHS());
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ Visit(E->getRHS());
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+ return;
+ }
+
+ EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, 0));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
+
+ // FIXME: volatile
+ CGF.EmitAggExpr(E->getSubExpr(), Val, false);
+ } else
+ Visit(E->getSubExpr());
+
+ CGF.PushCXXTemporary(E->getTemporary(), Val);
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
+ }
+
+ CGF.EmitCXXConstructExpr(Val, E);
+}
+
+void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ CGF.EmitCXXExprWithTemporaries(E, DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
+ // FIXME: Ignore result?
+ // FIXME: Are initializers affected by volatile?
+ if (isa<ImplicitValueInitExpr>(E)) {
+ EmitNullInitializationToLValue(LV, E->getType());
+ } else if (E->getType()->isComplexType()) {
+ CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
+ } else if (CGF.hasAggregateLLVMType(E->getType())) {
+ CGF.EmitAnyExpr(E, LV.getAddress(), false);
+ } else {
+ CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, E->getType());
+ }
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+ if (!CGF.hasAggregateLLVMType(T)) {
+ // For non-aggregates, we can store zero
+ llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
+ CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
+ } else {
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers are guaranteed to have a
+ // bit pattern of all zeros.
+ // FIXME: That isn't true for member pointers!
+ // There's a potential optimization opportunity in combining
+ // memsets; that would be easy for arrays, but relatively
+ // difficult for structures with the current code.
+ CGF.EmitMemSetToZero(LV.getAddress(), T);
+ }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+ // FIXME: Disabled while we figure out what to do about
+ // test/CodeGen/bitfield.c
+ //
+ // If we can, prefer a copy from a global; this is a lot less code for long
+ // globals, and it's easier for the current optimizers to analyze.
+ // FIXME: Should we really be doing this? Should we try to avoid cases where
+ // we emit a global with a lot of zeros? Should we try to avoid short
+ // globals?
+ if (E->isConstantInitializer(CGF.getContext(), 0)) {
+ llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, &CGF);
+ llvm::GlobalVariable* GV =
+ new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, "", &CGF.CGM.getModule(), 0);
+ EmitFinalDestCopy(E, LValue::MakeAddr(GV, 0));
+ return;
+ }
+#endif
+ if (E->hadArrayRangeDesignator()) {
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ }
+
+ // Handle initialization of an array.
+ if (E->getType()->isArrayType()) {
+ const llvm::PointerType *APType =
+ cast<llvm::PointerType>(DestPtr->getType());
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+
+ uint64_t NumInitElements = E->getNumInits();
+
+ if (E->getNumInits() > 0) {
+ QualType T1 = E->getType();
+ QualType T2 = E->getInit(0)->getType();
+ if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
+ EmitAggLoadOfLValue(E->getInit(0));
+ return;
+ }
+ }
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
+ ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
+
+ unsigned CVRqualifier = ElementType.getCVRQualifiers();
+
+ for (uint64_t i = 0; i != NumArrayElements; ++i) {
+ llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
+ if (i < NumInitElements)
+ EmitInitializationToLValue(E->getInit(i),
+ LValue::MakeAddr(NextVal, CVRqualifier));
+ else
+ EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, CVRqualifier),
+ ElementType);
+ }
+ return;
+ }
+
+ assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned NumInitElements = E->getNumInits();
+ RecordDecl *SD = E->getType()->getAsRecordType()->getDecl();
+ unsigned CurInitVal = 0;
+
+ if (E->getType()->isUnionType()) {
+ // Only initialize one field of a union. The field itself is
+ // specified by the initializer list.
+ if (!E->getInitializedFieldInUnion()) {
+ // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()),
+ FieldEnd = SD->field_end(CGF.getContext());
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return;
+ }
+
+ // FIXME: volatility
+ FieldDecl *Field = E->getInitializedFieldInUnion();
+ LValue FieldLoc = CGF.EmitLValueForField(DestPtr, Field, true, 0);
+
+ if (NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(0), FieldLoc);
+ } else {
+ // Default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+
+ return;
+ }
+
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()),
+ FieldEnd = SD->field_end(CGF.getContext());
+ Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member
+ if (Field->getType()->isIncompleteArrayType())
+ break;
+
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // FIXME: volatility
+ LValue FieldLoc = CGF.EmitLValueForField(DestPtr, *Field, false, 0);
+ // We never generate write-barries for initialized fields.
+ LValue::SetObjCNonGC(FieldLoc, true);
+ if (CurInitVal < NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc);
+ } else {
+ // We're out of initalizers; default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type. The result is computed into DestPtr. Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed. If VolatileDest is
+/// true, DestPtr cannot be 0.
+void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
+ bool VolatileDest, bool IgnoreResult) {
+ assert(E && hasAggregateLLVMType(E->getType()) &&
+ "Invalid aggregate expression to emit");
+ assert ((DestPtr != 0 || VolatileDest == false)
+ && "volatile aggregate can't be 0");
+
+ AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult)
+ .Visit(const_cast<Expr*>(E));
+}
+
+void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ EmitMemSetToZero(DestPtr, Ty);
+}
+
+void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
+ llvm::Value *SrcPtr, QualType Ty,
+ bool isVolatile) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ // Aggregate assignment turns into llvm.memcpy. This is almost valid per
+ // C99 6.5.16.1p3, which states "If the value being stored in an object is
+ // read from another object that overlaps in anyway the storage of the first
+ // object, then the overlap shall be exact and the two objects shall have
+ // qualified or unqualified versions of a compatible type."
+ //
+ // memcpy is not defined if the source and destination pointers are exactly
+ // equal, but other compilers do this optimization, and almost every memcpy
+ // implementation handles this case safely. If there is a libc that does not
+ // safely handle this, we can add a target hook.
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+ // FIXME: Handle variable sized types.
+ const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
+
+ // FIXME: If we have a volatile struct, the optimizer can remove what might
+ // appear to be `extra' memory ops:
+ //
+ // volatile struct { int i; } a, b;
+ //
+ // int main() {
+ // a = b;
+ // a = b;
+ // }
+ //
+ // we need to use a differnt call here. We use isVolatile to indicate when
+ // either the source or the destination is volatile.
+ Builder.CreateCall4(CGM.getMemCpyFn(),
+ DestPtr, SrcPtr,
+ // TypeInfo.first describes size in bits.
+ llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ TypeInfo.second/8));
+}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..41fb725
--- /dev/null
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,663 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Compiler.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+namespace {
+class VISIBILITY_HIDDEN ComplexExprEmitter
+ : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ // True is we should ignore the value of a
+ bool IgnoreReal;
+ bool IgnoreImag;
+ // True if we should ignore the value of a=b
+ bool IgnoreRealAssign;
+ bool IgnoreImagAssign;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
+ bool irn=false, bool iin=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
+ IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreReal() {
+ bool I = IgnoreReal;
+ IgnoreReal = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImag() {
+ bool I = IgnoreImag;
+ IgnoreImag = false;
+ return I;
+ }
+ bool TestAndClearIgnoreRealAssign() {
+ bool I = IgnoreRealAssign;
+ IgnoreRealAssign = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImagAssign() {
+ bool I = IgnoreImagAssign;
+ IgnoreImagAssign = false;
+ return I;
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+ }
+
+ /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
+ /// the real and imaginary pieces.
+ ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+
+ /// EmitStoreOfComplex - Store the specified real/imag parts into the
+ /// specified value pointer.
+ void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
+
+ /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+ QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ ComplexPairTy VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return ComplexPairTy();
+ }
+ ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+
+ // l-values.
+ ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // FIXME: CompoundLiteralExpr
+
+ ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
+ ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // Unlike for scalars, we don't have to worry about function->ptr demotion
+ // here.
+ return EmitCast(E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCastExpr(CastExpr *E) {
+ return EmitCast(E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCallExpr(const CallExpr *E);
+ ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+ // Operators.
+ ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre);
+ ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+ ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
+ // LNot,Real,Imag never return complex.
+ ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
+ }
+ ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAsComplexType()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+ ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAsComplexType()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+
+ struct BinOpInfo {
+ ComplexPairTy LHS;
+ ComplexPairTy RHS;
+ QualType Ty; // Computation Type.
+ };
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &));
+
+ ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+ ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+ ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+ ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+ return EmitBinAdd(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+ return EmitBinSub(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+ return EmitBinDiv(EmitBinOps(E));
+ }
+
+ // Compound assignments.
+ ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+ }
+ ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+ }
+ ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+ }
+ ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+ }
+
+ // GCC rejects rem/and/or/xor for integer complex.
+ // Logical and/or always return int, never complex.
+
+ // No comparisons produce a complex result.
+ ComplexPairTy VisitBinAssign (const BinaryOperator *E);
+ ComplexPairTy VisitBinComma (const BinaryOperator *E);
+
+
+ ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
+ ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+ ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+ ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
+ bool isVolatile) {
+ llvm::SmallString<64> Name(SrcPtr->getNameStart(),
+ SrcPtr->getNameStart()+SrcPtr->getNameLen());
+
+ llvm::Value *Real=0, *Imag=0;
+
+ if (!IgnoreReal) {
+ Name += ".realp";
+ llvm::Value *RealPtr = Builder.CreateStructGEP(SrcPtr, 0, Name.c_str());
+
+ Name.pop_back(); // .realp -> .real
+ Real = Builder.CreateLoad(RealPtr, isVolatile, Name.c_str());
+ Name.resize(Name.size()-4); // .real -> .imagp
+ }
+
+ if (!IgnoreImag) {
+ Name += "imagp";
+
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(SrcPtr, 1, Name.c_str());
+
+ Name.pop_back(); // .imagp -> .imag
+ Imag = Builder.CreateLoad(ImagPtr, isVolatile, Name.c_str());
+ }
+ return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
+ bool isVolatile) {
+ llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
+
+ Builder.CreateStore(Val.first, RealPtr, isVolatile);
+ Builder.CreateStore(Val.second, ImagPtr, isVolatile);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "complex expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAsComplexType()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+ return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+}
+
+/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+ QualType SrcType,
+ QualType DestType) {
+ // Get the src/dest element type.
+ SrcType = SrcType->getAsComplexType()->getElementType();
+ DestType = DestType->getAsComplexType()->getElementType();
+
+ // C99 6.3.1.6: When a value of complex type is converted to another
+ // complex type, both the real and imaginary parts follow the conversion
+ // rules for the corresponding real types.
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
+ // Two cases here: cast from (complex to complex) and (scalar to complex).
+ if (Op->getType()->isAnyComplexType())
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+
+ // C99 6.3.1.7: When a value of real type is converted to a complex type, the
+ // real part of the complex result value is determined by the rules of
+ // conversion to the corresponding real type and the imaginary part of the
+ // complex result value is a positive zero or an unsigned zero.
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAsComplexType()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+ } else {
+ QualType ElemTy = E->getType()->getAsComplexType()->getElementType();
+ llvm::APFloat FVal(CGF.getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(FVal);
+ }
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ EmitStoreOfComplex(IncVal, LV.getAddress(), LV.isVolatileQualified());
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResR = Builder.CreateNeg(Op.first, "neg.r");
+ llvm::Value *ResI = Builder.CreateNeg(Op.second, "neg.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ // ~(a+ib) = a + i*-b
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResI = Builder.CreateNeg(Op.second, "conj.i");
+ return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+ llvm::Value *ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ llvm::Value *ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+ llvm::Value *ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ llvm::Value *ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+ llvm::Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ llvm::Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ llvm::Value *ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+ llvm::Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ llvm::Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ llvm::Value *ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+ llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+ llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+ llvm::Value *DSTr, *DSTi;
+ if (Tmp3->getType()->isFloatingPoint()) {
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ if (Op.Ty->getAsComplexType()->getElementType()->isUnsignedIntegerType()) {
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp");
+ }
+ }
+
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ BinOpInfo Ops;
+ Ops.LHS = Visit(E->getLHS());
+ Ops.RHS = Visit(E->getRHS());
+ Ops.Ty = E->getType();
+ return Ops;
+}
+
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little. It is possible for the RHS to be complex or
+ // scalar.
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
+
+ LValue LHSLV = CGF.EmitLValue(E->getLHS());
+
+
+ // We know the LHS is a complex lvalue.
+ OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(),LHSLV.isVolatileQualified());
+ OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty);
+
+ // Expand the binary operator.
+ ComplexPairTy Result = (this->*Func)(OpInfo);
+
+ // Truncate the result back to the LHS type.
+ Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+
+ // Store the result value into the LHS lvalue.
+ EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
+ // And now return the LHS
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+ return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) ==
+ CGF.getContext().getCanonicalType(E->getRHS()->getType()) &&
+ "Invalid assignment");
+ // Emit the RHS.
+ ComplexPairTy Val = Visit(E->getRHS());
+
+ // Compute the address to store into.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Store into it.
+ EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
+ // And now return the LHS
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
+ Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for complex value");
+
+ ComplexPairTy LHS = Visit(E->getLHS());
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ ComplexPairTy RHS = Visit(E->getRHS());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
+ RealPN->reserveOperandSpace(2);
+ RealPN->addIncoming(LHS.first, LHSBlock);
+ RealPN->addIncoming(RHS.first, RHSBlock);
+
+ // Create a PHI node for the imaginary part.
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i");
+ ImagPN->reserveOperandSpace(2);
+ ImagPN->addIncoming(LHS.second, LHSBlock);
+ ImagPN->addIncoming(RHS.second, RHSBlock);
+
+ return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreReal();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ Ignore = TestAndClearIgnoreImag();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ if (E->getNumInits())
+ return Visit(E->getInit(0));
+
+ // Empty init list intializes to null
+ QualType Ty = E->getType()->getAsComplexType()->getElementType();
+ const llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+ return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(E, "complex va_arg expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAsComplexType()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ // FIXME Volatility.
+ return EmitLoadOfComplex(ArgPtr, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+ bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+/// of complex type, storing into the specified Value*.
+void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+ ComplexExprEmitter Emitter(*this);
+ ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+ Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile);
+}
+
+/// StoreComplexToAddr - Store a complex number into the specified address.
+void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile);
+}
+
+/// LoadComplexFromAddr - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+ bool SrcIsVolatile) {
+ return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
+}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..b30bafb
--- /dev/null
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,588 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class VISIBILITY_HIDDEN ConstExprEmitter :
+ public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+public:
+ ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+ : CGM(cgm), CGF(cgf) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *VisitStmt(Stmt *S) {
+ return 0;
+ }
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+
+ llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Visit(E->getInitializer());
+ }
+
+ llvm::Constant *VisitCastExpr(CastExpr* E) {
+ // GCC cast to union extension
+ if (E->getType()->isUnionType()) {
+ const llvm::Type *Ty = ConvertType(E->getType());
+ Expr *SubExpr = E->getSubExpr();
+ return EmitUnion(CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF),
+ Ty);
+ }
+ // Explicit and implicit no-op casts
+ QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
+ if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) {
+ return Visit(E->getSubExpr());
+ }
+ return 0;
+ }
+
+ llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+
+ llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+ std::vector<llvm::Constant*> Elts;
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+ unsigned NumInitElements = ILE->getNumInits();
+ // FIXME: Check for wide strings
+ // FIXME: Check for NumInitElements exactly equal to 1??
+ if (NumInitElements > 0 &&
+ (isa<StringLiteral>(ILE->getInit(0)) ||
+ isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
+ ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
+ return Visit(ILE->getInit(0));
+ const llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Initialising an array requires us to automatically
+ // initialise any elements that have not been initialised explicitly
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ unsigned i = 0;
+ bool RewriteType = false;
+ for (; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ RewriteType |= (C->getType() != ElemTy);
+ Elts.push_back(C);
+ }
+
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ for (; i < NumElements; ++i)
+ Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+
+ if (RewriteType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<const llvm::Type*> Types;
+ for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.push_back(Elts[i]->getType());
+ const llvm::StructType *SType = llvm::StructType::get(Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts,
+ FieldDecl* Field, Expr* E) {
+ // Calculate the value to insert
+ llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF);
+ if (!C)
+ return;
+
+ llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C);
+ if (!CI) {
+ CGM.ErrorUnsupported(E, "bitfield initialization");
+ return;
+ }
+ llvm::APInt V = CI->getValue();
+
+ // Calculate information about the relevant field
+ const llvm::Type* Ty = CI->getType();
+ const llvm::TargetData &TD = CGM.getTypes().getTargetData();
+ unsigned size = TD.getTypeAllocSizeInBits(Ty);
+ unsigned fieldOffset = CGM.getTypes().getLLVMFieldNo(Field) * size;
+ CodeGenTypes::BitFieldInfo bitFieldInfo =
+ CGM.getTypes().getBitFieldInfo(Field);
+ fieldOffset += bitFieldInfo.Begin;
+
+ // Find where to start the insertion
+ // FIXME: This is O(n^2) in the number of bit-fields!
+ // FIXME: This won't work if the struct isn't completely packed!
+ unsigned offset = 0, i = 0;
+ while (offset < (fieldOffset & -8))
+ offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType());
+
+ // Advance over 0 sized elements (must terminate in bounds since
+ // the bitfield must have a size).
+ while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0)
+ ++i;
+
+ // Promote the size of V if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (bitFieldInfo.Size > V.getBitWidth())
+ V.zext(bitFieldInfo.Size);
+
+ // Insert the bits into the struct
+ // FIXME: This algorthm is only correct on X86!
+ // FIXME: THis algorthm assumes bit-fields only have byte-size elements!
+ unsigned bitsToInsert = bitFieldInfo.Size;
+ unsigned curBits = std::min(8 - (fieldOffset & 7), bitsToInsert);
+ unsigned byte = V.getLoBits(curBits).getZExtValue() << (fieldOffset & 7);
+ do {
+ llvm::Constant* byteC = llvm::ConstantInt::get(llvm::Type::Int8Ty, byte);
+ Elts[i] = llvm::ConstantExpr::getOr(Elts[i], byteC);
+ ++i;
+ V = V.lshr(curBits);
+ bitsToInsert -= curBits;
+
+ if (!bitsToInsert)
+ break;
+
+ curBits = bitsToInsert > 8 ? 8 : bitsToInsert;
+ byte = V.getLoBits(curBits).getZExtValue();
+ } while (true);
+ }
+
+ llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
+ const llvm::StructType *SType =
+ cast<llvm::StructType>(ConvertType(ILE->getType()));
+ RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
+ std::vector<llvm::Constant*> Elts;
+
+ // Initialize the whole structure to zero.
+ // FIXME: This doesn't handle member pointers correctly!
+ for (unsigned i = 0; i < SType->getNumElements(); ++i) {
+ const llvm::Type *FieldTy = SType->getElementType(i);
+ Elts.push_back(llvm::Constant::getNullValue(FieldTy));
+ }
+
+ // Copy initializer elements. Skip padding fields.
+ unsigned EltNo = 0; // Element no in ILE
+ int FieldNo = 0; // Field no in RecordDecl
+ bool RewriteType = false;
+ for (RecordDecl::field_iterator Field = RD->field_begin(CGM.getContext()),
+ FieldEnd = RD->field_end(CGM.getContext());
+ EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) {
+ FieldNo++;
+ if (!Field->getIdentifier())
+ continue;
+
+ if (Field->isBitField()) {
+ InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo));
+ } else {
+ unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field);
+ llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo),
+ Field->getType(), CGF);
+ if (!C) return 0;
+ RewriteType |= (C->getType() != Elts[FieldNo]->getType());
+ Elts[FieldNo] = C;
+ }
+ EltNo++;
+ }
+
+ if (RewriteType) {
+ // FIXME: Make this work for non-packed structs
+ assert(SType->isPacked() && "Cannot recreate unpacked structs");
+ std::vector<const llvm::Type*> Types;
+ for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.push_back(Elts[i]->getType());
+ SType = llvm::StructType::get(Types, true);
+ }
+
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) {
+ if (!C)
+ return 0;
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ std::vector<llvm::Constant*> Elts;
+ std::vector<const llvm::Type*> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
+ while (CurSize < TotalSize) {
+ Elts.push_back(llvm::Constant::getNullValue(llvm::Type::Int8Ty));
+ Types.push_back(llvm::Type::Int8Ty);
+ CurSize++;
+ }
+
+ // This always generates a packed struct
+ // FIXME: Try to generate an unpacked struct when we can
+ llvm::StructType* STy = llvm::StructType::get(Types, true);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+
+ llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ const llvm::Type *Ty = ConvertType(ILE->getType());
+
+ FieldDecl* curField = ILE->getInitializedFieldInUnion();
+ if (!curField) {
+ // There's no field to initialize, so value-initialize the union.
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
+ for (RecordDecl::field_iterator Field = RD->field_begin(CGM.getContext()),
+ FieldEnd = RD->field_end(CGM.getContext());
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return llvm::Constant::getNullValue(Ty);
+ }
+
+ if (curField->isBitField()) {
+ // Create a dummy struct for bit-field insertion
+ unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty);
+ llvm::Constant* NV = llvm::Constant::getNullValue(llvm::Type::Int8Ty);
+ std::vector<llvm::Constant*> Elts(NumElts, NV);
+
+ InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0));
+ const llvm::ArrayType *RetTy =
+ llvm::ArrayType::get(NV->getType(), NumElts);
+ return llvm::ConstantArray::get(RetTy, Elts);
+ }
+
+ llvm::Constant *InitElem;
+ if (ILE->getNumInits() > 0) {
+ Expr *Init = ILE->getInit(0);
+ InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ } else {
+ InitElem = CGM.EmitNullConstant(curField->getType());
+ }
+ return EmitUnion(InitElem, Ty);
+ }
+
+ llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
+ const llvm::VectorType *VType =
+ cast<llvm::VectorType>(ConvertType(ILE->getType()));
+ const llvm::Type *ElemTy = VType->getElementType();
+ std::vector<llvm::Constant*> Elts;
+ unsigned NumElements = VType->getNumElements();
+ unsigned NumInitElements = ILE->getNumInits();
+
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ unsigned i = 0;
+ for (; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ Elts.push_back(C);
+ }
+
+ for (; i < NumElements; ++i)
+ Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+
+ return llvm::ConstantVector::get(VType, Elts);
+ }
+
+ llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+ return CGM.EmitNullConstant(E->getType());
+ }
+
+ llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isScalarType()) {
+ // We have a scalar in braces. Just use the first element.
+ if (ILE->getNumInits() > 0) {
+ Expr *Init = ILE->getInit(0);
+ return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ }
+ return CGM.EmitNullConstant(ILE->getType());
+ }
+
+ if (ILE->getType()->isArrayType())
+ return EmitArrayInitialization(ILE);
+
+ if (ILE->getType()->isStructureType())
+ return EmitStructInitialization(ILE);
+
+ if (ILE->getType()->isUnionType())
+ return EmitUnionInitialization(ILE);
+
+ if (ILE->getType()->isVectorType())
+ return EmitVectorInitialization(ILE);
+
+ assert(0 && "Unable to handle InitListExpr");
+ // Get rid of control reaches end of void function warning.
+ // Not reached.
+ return 0;
+ }
+
+ llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // This must be a string initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false);
+ }
+
+ llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ // This must be an @encode initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ std::string Str;
+ CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
+
+ // Resize the string to the right size, adding zeros at the end, or
+ // truncating as needed.
+ Str.resize(CAT->getSize().getZExtValue(), '\0');
+ return llvm::ConstantArray::get(Str, false);
+ }
+
+ llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // Utility methods
+ const llvm::Type *ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+ }
+
+public:
+ llvm::Constant *EmitLValue(Expr *E) {
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = Visit(CLE->getInitializer());
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(C->getType(),
+ E->getType().isConstQualified(),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", &CGM.getModule());
+ return C;
+ }
+ case Expr::DeclRefExprClass:
+ case Expr::QualifiedDeclRefExprClass: {
+ NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+ return CGM.GetAddrOfFunction(GlobalDecl(FD));
+ if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+ // We can never refer to a variable with local storage.
+ if (!VD->hasLocalStorage()) {
+ if (VD->isFileVarDecl() || VD->hasExternalStorage())
+ return CGM.GetAddrOfGlobalVar(VD);
+ else if (VD->isBlockVarDecl()) {
+ assert(CGF && "Can't access static local vars without CGF");
+ return CGF->GetAddrOfStaticLocalVar(VD);
+ }
+ }
+ }
+ break;
+ }
+ case Expr::StringLiteralClass:
+ return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+ case Expr::ObjCStringLiteralClass: {
+ ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+ llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ }
+ case Expr::PredefinedExprClass: {
+ // __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level".
+ std::string Str;
+ if (cast<PredefinedExpr>(E)->getIdentType() ==
+ PredefinedExpr::PrettyFunction)
+ Str = "top level";
+
+ return CGM.GetAddrOfConstantCString(Str, ".tmp");
+ }
+ case Expr::AddrLabelExprClass: {
+ assert(CGF && "Invalid address of label expression outside function.");
+ unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id);
+ return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType()));
+ }
+ case Expr::CallExprClass: {
+ CallExpr* CE = cast<CallExpr>(E);
+ if (CE->isBuiltinCall(CGM.getContext()) !=
+ Builtin::BI__builtin___CFStringMakeConstantString)
+ break;
+ const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+ const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ // FIXME: need to deal with UCN conversion issues.
+ return CGM.GetAddrOfConstantCFString(Literal);
+ }
+ case Expr::BlockExprClass: {
+ std::string FunctionName;
+ if (CGF)
+ FunctionName = CGF->CurFn->getName();
+ else
+ FunctionName = "global";
+
+ return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ }
+ }
+
+ return 0;
+ }
+};
+
+} // end anonymous namespace.
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ Expr::EvalResult Result;
+
+ bool Success = false;
+
+ if (DestType->isReferenceType())
+ Success = E->EvaluateAsLValue(Result, Context);
+ else
+ Success = E->Evaluate(Result, Context);
+
+ if (Success) {
+ assert(!Result.HasSideEffects &&
+ "Constant expr should not have any side effects!");
+ switch (Result.Val.getKind()) {
+ case APValue::Uninitialized:
+ assert(0 && "Constant expressions should be initialized.");
+ return 0;
+ case APValue::LValue: {
+ const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ Result.Val.getLValueOffset());
+
+ llvm::Constant *C;
+ if (const Expr *LVBase = Result.Val.getLValueBase()) {
+ C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
+
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ const llvm::Type *Type =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+ }
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+ return C;
+ }
+ }
+ case APValue::Int: {
+ llvm::Constant *C = llvm::ConstantInt::get(Result.Val.getInt());
+
+ if (C->getType() == llvm::Type::Int1Ty) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+ }
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(Result.Val.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(Result.Val.getComplexIntImag());
+
+ return llvm::ConstantStruct::get(Complex, 2);
+ }
+ case APValue::Float:
+ return llvm::ConstantFP::get(Result.Val.getFloat());
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(Result.Val.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(Result.Val.getComplexFloatImag());
+
+ return llvm::ConstantStruct::get(Complex, 2);
+ }
+ case APValue::Vector: {
+ llvm::SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Result.Val.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ APValue &Elt = Result.Val.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(Elt.getInt()));
+ else
+ Inits.push_back(llvm::ConstantFP::get(Elt.getFloat()));
+ }
+ return llvm::ConstantVector::get(&Inits[0], Inits.size());
+ }
+ }
+ }
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType() == llvm::Type::Int1Ty) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ // Always return an LLVM null constant for now; this will change when we
+ // get support for IRGen of member pointers.
+ return llvm::Constant::getNullValue(getTypes().ConvertType(T));
+}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..950e9e5
--- /dev/null
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,1575 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+// Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+struct BinOpInfo {
+ Value *LHS;
+ Value *RHS;
+ QualType Ty; // Computation Type.
+ const BinaryOperator *E;
+};
+
+namespace {
+class VISIBILITY_HIDDEN ScalarExprEmitter
+ : public StmtVisitor<ScalarExprEmitter, Value*> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreResultAssign;
+
+public:
+
+ ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreResultAssign() {
+ bool I = IgnoreResultAssign; IgnoreResultAssign = false;
+ return I; }
+
+ const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+
+ Value *EmitLoadOfLValue(LValue LV, QualType T) {
+ return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ Value *EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(EmitLValue(E), E->getType());
+ }
+
+ /// EmitConversionToBool - Convert the specified expression value to a
+ /// boolean (i1) truth value. This is equivalent to "Val != 0".
+ Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination
+ /// type is an LLVM scalar type.
+ Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ Value *VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return 0;
+ }
+ Value *VisitExpr(Expr *S);
+ Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+
+ // Leaves.
+ Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+ return llvm::ConstantInt::get(E->getValue());
+ }
+ Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+ return llvm::ConstantFP::get(E->getValue());
+ }
+ Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),
+ CGF.getContext().typesAreCompatible(
+ E->getArgType1(), E->getArgType2()));
+ }
+ Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
+ Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ llvm::Value *V =
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ CGF.GetIDForAddrOfLabel(E->getLabel()));
+
+ return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
+ }
+
+ // l-values.
+ Value *VisitDeclRefExpr(DeclRefExpr *E) {
+ if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl()))
+ return llvm::ConstantInt::get(EC->getInitVal());
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
+ }
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
+ }
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getScalarVal();
+ }
+
+ Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitMemberExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitStringLiteral(Expr *E) { return EmitLValue(E).getAddress(); }
+ Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
+ return EmitLValue(E).getAddress();
+ }
+
+ Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); }
+
+ Value *VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ unsigned NumInitElements = E->getNumInits();
+
+ if (E->hadArrayRangeDesignator()) {
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ }
+
+ const llvm::VectorType *VType =
+ dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+
+ // We have a scalar in braces. Just use the first element.
+ if (!VType)
+ return Visit(E->getInit(0));
+
+ unsigned NumVectorElements = VType->getNumElements();
+ const llvm::Type *ElementType = VType->getElementType();
+
+ // Emit individual vector element stores.
+ llvm::Value *V = llvm::UndefValue::get(VType);
+
+ // Emit initializers
+ unsigned i;
+ for (i = 0; i < NumInitElements; ++i) {
+ Value *NewV = Visit(E->getInit(i));
+ Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ V = Builder.CreateInsertElement(V, NewV, Idx);
+ }
+
+ // Emit remaining default initializers
+ for (/* Do not initialize i*/; i < NumVectorElements; ++i) {
+ Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ llvm::Value *NewV = llvm::Constant::getNullValue(ElementType);
+ V = Builder.CreateInsertElement(V, NewV, Idx);
+ }
+
+ return V;
+ }
+
+ Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ Value *VisitImplicitCastExpr(const ImplicitCastExpr *E);
+ Value *VisitCastExpr(const CastExpr *E) {
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (E->getType()->isVariablyModifiedType())
+ CGF.EmitVLASize(E->getType());
+
+ return EmitCastExpr(E->getSubExpr(), E->getType());
+ }
+ Value *EmitCastExpr(const Expr *E, QualType T);
+
+ Value *VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getScalarVal();
+ }
+
+ Value *VisitStmtExpr(const StmtExpr *E);
+
+ Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
+
+ // Unary Operators.
+ Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
+ Value *VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ Value *VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ Value *VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ Value *VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ return EmitLValue(E->getSubExpr()).getAddress();
+ }
+ Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitUnaryPlus(const UnaryOperator *E) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
+ TestAndClearIgnoreResultAssign();
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryMinus (const UnaryOperator *E);
+ Value *VisitUnaryNot (const UnaryOperator *E);
+ Value *VisitUnaryLNot (const UnaryOperator *E);
+ Value *VisitUnaryReal (const UnaryOperator *E);
+ Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryOffsetOf(const UnaryOperator *E);
+
+ // C++
+ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+ return CGF.LoadCXXThis();
+ }
+
+ Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
+ }
+ Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+ return CGF.EmitCXXNewExpr(E);
+ }
+
+ // Binary Operators.
+ Value *EmitMul(const BinOpInfo &Ops) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ }
+ /// Create a binary op that checks for overflow.
+ /// Currently only supports +, - and *.
+ Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ Value *EmitDiv(const BinOpInfo &Ops);
+ Value *EmitRem(const BinOpInfo &Ops);
+ Value *EmitAdd(const BinOpInfo &Ops);
+ Value *EmitSub(const BinOpInfo &Ops);
+ Value *EmitShl(const BinOpInfo &Ops);
+ Value *EmitShr(const BinOpInfo &Ops);
+ Value *EmitAnd(const BinOpInfo &Ops) {
+ return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+ }
+ Value *EmitXor(const BinOpInfo &Ops) {
+ return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+ }
+ Value *EmitOr (const BinOpInfo &Ops) {
+ return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+ }
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+ // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+ Value *VisitBin ## OP(const BinaryOperator *E) { \
+ return Emit ## OP(EmitBinOps(E)); \
+ } \
+ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+ }
+ HANDLEBINOP(Mul);
+ HANDLEBINOP(Div);
+ HANDLEBINOP(Rem);
+ HANDLEBINOP(Add);
+ HANDLEBINOP(Sub);
+ HANDLEBINOP(Shl);
+ HANDLEBINOP(Shr);
+ HANDLEBINOP(And);
+ HANDLEBINOP(Xor);
+ HANDLEBINOP(Or);
+#undef HANDLEBINOP
+
+ // Comparisons.
+ Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+ Value *VisitBin##CODE(const BinaryOperator *E) { \
+ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+ llvm::FCmpInst::FP); }
+ VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT);
+ VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT);
+ VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE);
+ VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE);
+ VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ);
+ VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE);
+#undef VISITCOMP
+
+ Value *VisitBinAssign (const BinaryOperator *E);
+
+ Value *VisitBinLAnd (const BinaryOperator *E);
+ Value *VisitBinLOr (const BinaryOperator *E);
+ Value *VisitBinComma (const BinaryOperator *E);
+
+ // Other Operators.
+ Value *VisitBlockExpr(const BlockExpr *BE);
+ Value *VisitConditionalOperator(const ConditionalOperator *CO);
+ Value *VisitChooseExpr(ChooseExpr *CE);
+ Value *VisitVAArgExpr(VAArgExpr *VE);
+ Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return CGF.EmitObjCStringLiteral(E);
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value. This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+ assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs");
+
+ if (SrcType->isRealFloatingType()) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateFCmpUNE(Src, Zero, "tobool");
+ }
+
+ assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+ "Unknown scalar type to convert");
+
+ // Because of the type rules of C, we often end up computing a logical value,
+ // then zero extending it to int, then wanting it as a logical value again.
+ // Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
+ if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ // Compare against an integer or pointer null.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateICmpNE(Src, Zero, "tobool");
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType) {
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ if (DstType->isVoidType()) return 0;
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstType->isBooleanType())
+ return EmitConversionToBool(Src, SrcType);
+
+ const llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (Src->getType() == DstTy)
+ return Src;
+
+ // Handle pointer conversions next: pointers can only be converted
+ // to/from other pointers and integers. Check for pointer types in
+ // terms of LLVM, as some native types (like Obj-C id) may map to a
+ // pointer type.
+ if (isa<llvm::PointerType>(DstTy)) {
+ // The source value may be an integer, or a pointer.
+ if (isa<llvm::PointerType>(Src->getType()))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+ assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ const llvm::Type *MiddleTy = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ bool InputSigned = SrcType->isSignedIntegerType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+ // Then, cast to pointer.
+ return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+ }
+
+ if (isa<llvm::PointerType>(Src->getType())) {
+ // Must be an ptr to int cast.
+ assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+ return Builder.CreatePtrToInt(Src, DstTy, "conv");
+ }
+
+ // A scalar can be splatted to an extended vector of the same element type
+ if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) {
+ // Cast the scalar to element type
+ QualType EltTy = DstType->getAsExtVectorType()->getElementType();
+ llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+ // Splat the element across to all elements
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ for (unsigned i = 0; i < NumElements; i++)
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ // Allow bitcast from vector to integer/fp of the same size.
+ if (isa<llvm::VectorType>(Src->getType()) ||
+ isa<llvm::VectorType>(DstTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ // Finally, we have the arithmetic types: real int/float.
+ if (isa<llvm::IntegerType>(Src->getType())) {
+ bool InputSigned = SrcType->isSignedIntegerType();
+ if (isa<llvm::IntegerType>(DstTy))
+ return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ return Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ return Builder.CreateUIToFP(Src, DstTy, "conv");
+ }
+
+ assert(Src->getType()->isFloatingPoint() && "Unknown real conversion");
+ if (isa<llvm::IntegerType>(DstTy)) {
+ if (DstType->isSignedIntegerType())
+ return Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPToUI(Src, DstTy, "conv");
+ }
+
+ assert(DstTy->isFloatingPoint() && "Unknown real conversion");
+ if (DstTy->getTypeID() < Src->getType()->getTypeID())
+ return Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPExt(Src, DstTy, "conv");
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified
+/// complex type to the specified destination type, where the destination
+/// type is an LLVM scalar type.
+Value *ScalarExprEmitter::
+EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy) {
+ // Get the source element type.
+ SrcTy = SrcTy->getAsComplexType()->getElementType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstTy->isBooleanType()) {
+ // Complex != 0 -> (Real != 0) | (Imag != 0)
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+ return Builder.CreateOr(Src.first, Src.second, "tobool");
+ }
+
+ // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+ // the imaginary part of the complex value is discarded and the value of the
+ // real part is converted according to the conversion rules for the
+ // corresponding real type.
+ return EmitScalarConversion(Src.first, SrcTy, DstTy);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "scalar expression");
+ if (E->getType()->isVoidType())
+ return 0;
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ llvm::SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
+ }
+ Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+ Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+ Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
+ return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Emit subscript expressions in rvalue context's. For most cases, this just
+ // loads the lvalue formed by the subscript expr. However, we have to be
+ // careful, because the base of a vector subscript is occasionally an rvalue,
+ // so we can't get it as an lvalue.
+ if (!E->getBase()->getType()->isVectorType())
+ return EmitLoadOfLValue(E);
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *Base = Visit(E->getBase());
+ Value *Idx = Visit(E->getIdx());
+ bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
+ Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned,
+ "vecidxcast");
+ return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but
+/// also handle things like function to pointer-to-function decay, and array to
+/// pointer decay.
+Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+ const Expr *Op = E->getSubExpr();
+
+ // If this is due to array->pointer conversion, emit the array expression as
+ // an l-value.
+ if (Op->getType()->isArrayType()) {
+ Value *V = EmitLValue(Op).getAddress(); // Bitfields can't be arrays.
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!Op->getType()->isVariableArrayType()) {
+ assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
+ assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
+ ->getElementType()) &&
+ "Expected pointer to array");
+ V = Builder.CreateStructGEP(V, 0, "arraydecay");
+ }
+
+ // The resultant pointer type can be implicitly casted to other pointer
+ // types as well (e.g. void*) and can be implicitly converted to integer.
+ const llvm::Type *DestTy = ConvertType(E->getType());
+ if (V->getType() != DestTy) {
+ if (isa<llvm::PointerType>(DestTy))
+ V = Builder.CreateBitCast(V, DestTy, "ptrconv");
+ else {
+ assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay");
+ V = Builder.CreatePtrToInt(V, DestTy, "ptrconv");
+ }
+ }
+ return V;
+ }
+
+ return EmitCastExpr(Op, E->getType());
+}
+
+
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) {
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ // Handle cases where the source is an non-complex type.
+
+ if (!CGF.hasAggregateLLVMType(E->getType())) {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // Use EmitScalarConversion to perform the conversion.
+ return EmitScalarConversion(Src, E->getType(), DestTy);
+ }
+
+ if (E->getType()->isAnyComplexType()) {
+ // Handle cases where the source is a complex type.
+ bool IgnoreImag = true;
+ bool IgnoreImagAssign = true;
+ bool IgnoreReal = IgnoreResultAssign;
+ bool IgnoreRealAssign = IgnoreResultAssign;
+ if (DestTy->isBooleanType())
+ IgnoreImagAssign = IgnoreImag = false;
+ else if (DestTy->isVoidType()) {
+ IgnoreReal = IgnoreImag = false;
+ IgnoreRealAssign = IgnoreImagAssign = true;
+ }
+ CodeGenFunction::ComplexPairTy V
+ = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign);
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+ }
+
+ // Okay, this is a cast from an aggregate. It must be a cast to void. Just
+ // evaluate the result and return.
+ CGF.EmitAggExpr(E, 0, false, true);
+ return 0;
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType()).getScalarVal();
+}
+
+Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ return Builder.CreateLoad(CGF.GetAddrOfBlockDecl(E), false, "tmp");
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Operators
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType ValTy = E->getSubExpr()->getType();
+ Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal();
+
+ int AmountVal = isInc ? 1 : -1;
+
+ if (ValTy->isPointerType() &&
+ ValTy->getAsPointerType()->isVariableArrayType()) {
+ // The amount of the addition/subtraction needs to account for the VLA size
+ CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
+ }
+
+ Value *NextVal;
+ if (const llvm::PointerType *PT =
+ dyn_cast<llvm::PointerType>(InVal->getType())) {
+ llvm::Constant *Inc =llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal);
+ if (!isa<llvm::FunctionType>(PT->getElementType())) {
+ NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec");
+ } else {
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+ NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+ NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ }
+ } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) {
+ // Bool++ is an interesting case, due to promotion rules, we get:
+ // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+ // Bool = ((int)Bool+1) != 0
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ NextVal = llvm::ConstantInt::getTrue();
+ } else {
+ // Add the inc/dec to the real part.
+ if (isa<llvm::IntegerType>(InVal->getType()))
+ NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+ else if (InVal->getType() == llvm::Type::FloatTy)
+ NextVal =
+ llvm::ConstantFP::get(llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType() == llvm::Type::DoubleTy)
+ NextVal =
+ llvm::ConstantFP::get(llvm::APFloat(static_cast<double>(AmountVal)));
+ else {
+ llvm::APFloat F(static_cast<float>(AmountVal));
+ bool ignored;
+ F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ NextVal = llvm::ConstantFP::get(F);
+ }
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitfield())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy,
+ &NextVal);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? NextVal : InVal;
+}
+
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNeg(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+ // Compare operand to zero.
+ Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+ // Invert value.
+ // TODO: Could dynamically modify easy computations here. For example, if
+ // the operand is an icmp ne, turn into icmp eq.
+ BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+ // ZExt result to the expr type.
+ return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
+ QualType TypeToSize = E->getTypeOfArgument();
+ if (E->isSizeOf()) {
+ if (const VariableArrayType *VAT =
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ if (E->isArgumentType()) {
+ // sizeof(type) - make sure to emit the VLA size.
+ CGF.EmitVLASize(TypeToSize);
+ } else {
+ // C99 6.5.3.4p2: If the argument is an expression of type
+ // VLA, it is evaluated.
+ CGF.EmitAnyExpr(E->getArgumentExpr());
+ }
+
+ return CGF.GetVLASize(VAT);
+ }
+ }
+
+ // If this isn't sizeof(vla), the result must be constant; use the
+ // constant folding logic so we don't have to duplicate it here.
+ Expr::EvalResult Result;
+ E->Evaluate(Result, CGF.getContext());
+ return llvm::ConstantInt::get(Result.Val.getInt());
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+ return Visit(Op);
+}
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+
+ // __imag on a scalar returns zero. Emit the subexpr to ensure side
+ // effects are evaluated, but not the actual value.
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
+{
+ Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
+ const llvm::Type* ResultType = ConvertType(E->getType());
+ return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ BinOpInfo Result;
+ Result.LHS = Visit(E->getLHS());
+ Result.RHS = Visit(E->getRHS());
+ Result.Ty = E->getType();
+ Result.E = E;
+ return Result;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ if (E->getComputationResultType()->isAnyComplexType()) {
+ // This needs to go through the complex expression emitter, but
+ // it's a tad complicated to do that... I'm leaving it out for now.
+ // (Note that we do actually need the imaginary part of the RHS for
+ // multiplication and division.)
+ CGF.ErrorUnsupported(E, "complex compound assignment");
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ }
+
+ // Emit the RHS first. __block variables need to have the rhs evaluated
+ // first, plus this should improve codegen a little.
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.E = E;
+ // Load/convert the LHS.
+ LValue LHSLV = EmitLValue(E->getLHS());
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
+ // Expand the binary operator.
+ Value *Result = (this->*Func)(OpInfo);
+
+ // Convert the result back to the LHS type.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ // Store the result value into the LHS lvalue. Bit-fields are
+ // handled specially because the result is altered by the store,
+ // i.e., [C99 6.5.16p1] 'An assignment expression has the value of
+ // the left operand after the assignment...'.
+ if (LHSLV.isBitfield()) {
+ if (!LHSLV.isVolatileQualified()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
+ &Result);
+ return Result;
+ } else
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
+ } else
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+ if (Ignore)
+ return 0;
+ return EmitLoadOfLValue(LHSLV, E->getType());
+}
+
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (Ops.LHS->getType()->isFPOrFPVector())
+ return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ else if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+ else
+ return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+ // Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+ else
+ return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+ unsigned IID;
+ unsigned OpID = 0;
+
+ switch (Ops.E->getOpcode()) {
+ case BinaryOperator::Add:
+ case BinaryOperator::AddAssign:
+ OpID = 1;
+ IID = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case BinaryOperator::Sub:
+ case BinaryOperator::SubAssign:
+ OpID = 2;
+ IID = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case BinaryOperator::Mul:
+ case BinaryOperator::MulAssign:
+ OpID = 3;
+ IID = llvm::Intrinsic::smul_with_overflow;
+ break;
+ default:
+ assert(false && "Unsupported operation for overflow detection");
+ IID = 0;
+ }
+ OpID <<= 1;
+ OpID |= 1;
+
+ const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
+
+ Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
+ Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+ Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+ // Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *overflowBB =
+ CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::BasicBlock *continueBB =
+ CGF.createBasicBlock("overflow.continue", CGF.CurFn);
+
+ Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+ // Handle overflow
+
+ Builder.SetInsertPoint(overflowBB);
+
+ // Handler is:
+ // long long *__overflow_handler)(long long a, long long b, char op,
+ // char width)
+ std::vector<const llvm::Type*> handerArgTypes;
+ handerArgTypes.push_back(llvm::Type::Int64Ty);
+ handerArgTypes.push_back(llvm::Type::Int64Ty);
+ handerArgTypes.push_back(llvm::Type::Int8Ty);
+ handerArgTypes.push_back(llvm::Type::Int8Ty);
+ llvm::FunctionType *handlerTy = llvm::FunctionType::get(llvm::Type::Int64Ty,
+ handerArgTypes, false);
+ llvm::Value *handlerFunction =
+ CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
+ llvm::PointerType::getUnqual(handlerTy));
+ handlerFunction = Builder.CreateLoad(handlerFunction);
+
+ llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
+ Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty),
+ Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty),
+ llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID),
+ llvm::ConstantInt::get(llvm::Type::Int8Ty,
+ cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+
+ Builder.CreateBr(continueBB);
+
+ // Set up the continuation
+ Builder.SetInsertPoint(continueBB);
+ // Get the correct result
+ llvm::PHINode *phi = Builder.CreatePHI(opTy);
+ phi->reserveOperandSpace(2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
+ if (!Ops.Ty->isPointerType()) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+ return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+ }
+
+ if (Ops.Ty->getAsPointerType()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size
+ CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
+ }
+ Value *Ptr, *Idx;
+ Expr *IdxExp;
+ const PointerType *PT;
+ if ((PT = Ops.E->getLHS()->getType()->getAsPointerType())) {
+ Ptr = Ops.LHS;
+ Idx = Ops.RHS;
+ IdxExp = Ops.E->getRHS();
+ } else { // int + pointer
+ PT = Ops.E->getRHS()->getType()->getAsPointerType();
+ assert(PT && "Invalid add expr");
+ Ptr = Ops.RHS;
+ Idx = Ops.LHS;
+ IdxExp = Ops.E->getLHS();
+ }
+
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ if (IdxExp->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+
+ const QualType ElementType = PT->getPointeeType();
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().getTypeSize(OIT) / 8);
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic
+ // extensions. The GNU void* casts amount to no-ops since our void*
+ // type is i8*, but this is future proof.
+ if (ElementType->isVoidType() || ElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ return Builder.CreateGEP(Ptr, Idx, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
+ if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+ return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ }
+
+ if (Ops.E->getLHS()->getType()->getAsPointerType()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size for
+ // ptr-int
+ // The amount of the division needs to account for the VLA size for
+ // ptr-ptr.
+ CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
+ }
+
+ const QualType LHSType = Ops.E->getLHS()->getType();
+ const QualType LHSElementType = LHSType->getAsPointerType()->getPointeeType();
+ if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
+ // pointer - int
+ Value *Idx = Ops.RHS;
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ if (Ops.E->getRHS()->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+ Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
+
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ if (const ObjCInterfaceType *OIT =
+ dyn_cast<ObjCInterfaceType>(LHSElementType)) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().getTypeSize(OIT) / 8);
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic
+ // extensions. The GNU void* casts amount to no-ops since our
+ // void* type is i8*, but this is future proof.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
+ } else {
+ // pointer - pointer
+ Value *LHS = Ops.LHS;
+ Value *RHS = Ops.RHS;
+
+ uint64_t ElementSize;
+
+ // Handle GCC extension for pointer arithmetic on void* and function pointer
+ // types.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ ElementSize = 1;
+ } else {
+ ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8;
+ }
+
+ const llvm::Type *ResultType = ConvertType(Ops.Ty);
+ LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Optimize out the shift for element size of 1.
+ if (ElementSize == 1)
+ return BytesBetween;
+
+ // HACK: LLVM doesn't have an divide instruction that 'knows' there is no
+ // remainder. As such, we handle common power-of-two cases here to generate
+ // better code. See PR2247.
+ if (llvm::isPowerOf2_64(ElementSize)) {
+ Value *ShAmt =
+ llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize));
+ return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr");
+ }
+
+ // Otherwise, do a full sdiv.
+ Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize);
+ return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ }
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+ return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc) {
+ TestAndClearIgnoreResultAssign();
+ Value *Result;
+ QualType LHSTy = E->getLHS()->getType();
+ if (!LHSTy->isAnyComplexType() && !LHSTy->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ if (LHS->getType()->isFloatingPoint()) {
+ Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->isSignedIntegerType()) {
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Unsigned integers and pointers.
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ }
+ } else if (LHSTy->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ if (LHS->getType()->isFPOrFPVector()) {
+ Result = Builder.CreateVFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->isUnsignedIntegerType()) {
+ Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Signed integers and pointers.
+ Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ }
+ return Result;
+ } else {
+ // Complex Comparison: can only be an equality comparison.
+ CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
+ CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
+
+ QualType CETy = LHSTy->getAsComplexType()->getElementType();
+
+ Value *ResultR, *ResultI;
+ if (CETy->isRealFloatingType()) {
+ ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ } else {
+ // Complex comparisons can only be equality comparisons. As such, signed
+ // and unsigned opcodes are the same.
+ ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ }
+
+ if (E->getOpcode() == BinaryOperator::EQ) {
+ Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+ } else {
+ assert(E->getOpcode() == BinaryOperator::NE &&
+ "Complex comparison other than == or != ?");
+ Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+ }
+ }
+
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen just a little.
+ Value *RHS = Visit(E->getRHS());
+ LValue LHS = EmitLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitfield()) {
+ if (!LHS.isVolatileQualified()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+ &RHS);
+ return RHS;
+ } else
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
+ } else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+ if (Ignore)
+ return 0;
+ return EmitLoadOfLValue(LHS, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+ // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+ // If we have 1 && X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == 1) { // If we have 1 && X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int.
+ return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext");
+ }
+
+ // 0 && RHS: If it is safe, just elide the RHS, and return 0.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::Constant::getNullValue(CGF.LLVMIntTy);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+
+ // Branch on the LHS first. If it is false, go to the failure (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be false. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getFalse(), *PI);
+
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+ // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+ // If we have 0 || X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == -1) { // If we have 0 || X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int.
+ return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext");
+ }
+
+ // 1 || RHS: If it is safe, just elide the RHS, and return 1.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::ConstantInt::get(CGF.LLVMIntTy, 1);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+ // Branch on the LHS first. If it is true, go to the success (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be true. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getTrue(), *PI);
+
+ // Emit the RHS condition as a bool value.
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally. This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr());
+
+ // TODO: Allow anything we can constant fold to an integer or fp constant.
+ if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
+ isa<FloatingLiteral>(E))
+ return true;
+
+ // Non-volatile automatic variables too, to get "cond ? X : Y" where
+ // X and Y are local variables.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified())
+ return true;
+
+ return false;
+}
+
+
+Value *ScalarExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
+ Expr *Live = E->getLHS(), *Dead = E->getRHS();
+ if (Cond == -1)
+ std::swap(Live, Dead);
+
+ // If the dead side doesn't have labels we need, and if the Live side isn't
+ // the gnu missing ?: extension (which we could handle, but don't bother
+ // to), just emit the Live part.
+ if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part
+ Live) // Live part isn't missing.
+ return Visit(Live);
+ }
+
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap and
+ // safe to evaluate the LHS and RHS unconditionally.
+ if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS()) &&
+ isCheapEnoughToEvaluateUnconditionally(E->getRHS())) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
+ llvm::Value *LHS = Visit(E->getLHS());
+ llvm::Value *RHS = Visit(E->getRHS());
+ return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+ }
+
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ Value *CondVal = 0;
+
+ // If we don't have the GNU missing condition extension, emit a branch on
+ // bool the normal way.
+ if (E->getLHS()) {
+ // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
+ // the branch on bool.
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ } else {
+ // Otherwise, for the ?: extension, evaluate the conditional and then
+ // convert it to bool the hard way. We do this explicitly because we need
+ // the unconverted value for the missing middle value of the ?:.
+ CondVal = CGF.EmitScalarExpr(E->getCond());
+
+ // In some cases, EmitScalarConversion will delete the "CondVal" expression
+ // if there are no extra uses (an optimization). Inhibit this by making an
+ // extra dead use, because we're going to add a use of CondVal later. We
+ // don't use the builder for this, because we don't want it to get optimized
+ // away. This leaves dead code, but the ?: extension isn't common.
+ new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
+ Builder.GetInsertBlock());
+
+ Value *CondBoolVal =
+ CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
+ CGF.getContext().BoolTy);
+ Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
+ }
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ Value *LHS;
+ if (E->getLHS())
+ LHS = Visit(E->getLHS());
+ else // Perform promotions, to handle cases like "short ?: int"
+ LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ Value *RHS = Visit(E->getRHS());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ if (!LHS || !RHS) {
+ assert(E->getType()->isVoidType() && "Non-void value should have a value");
+ return 0;
+ }
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
+ PN->reserveOperandSpace(2);
+ PN->addIncoming(LHS, LHSBlock);
+ PN->addIncoming(RHS, RHSBlock);
+ return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ if (!ArgPtr)
+ return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
+
+ // FIXME Volatility.
+ return Builder.CreateLoad(ArgPtr);
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
+ return CGF.BuildBlockLiteralTmp(BE);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitScalarExpr - Emit the computation of the specified expression of
+/// scalar type, ignoring the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+ assert(E && !hasAggregateLLVMType(E->getType()) &&
+ "Invalid scalar expression to emit");
+
+ return ScalarExprEmitter(*this, IgnoreResultAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+ QualType DstTy) {
+ assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
+ "Invalid scalar expression to emit");
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified
+/// complex type to the specified destination type, where the destination
+/// type is an LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+ QualType SrcTy,
+ QualType DstTy) {
+ assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
+ "Invalid complex -> scalar conversion");
+ return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
+ DstTy);
+}
+
+Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
+ assert(V1->getType() == V2->getType() &&
+ "Vector operands must be of the same type");
+ unsigned NumElements =
+ cast<llvm::VectorType>(V1->getType())->getNumElements();
+
+ va_list va;
+ va_start(va, V2);
+
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ for (unsigned i = 0; i < NumElements; i++) {
+ int n = va_arg(va, int);
+ assert(n >= 0 && n < (int)NumElements * 2 &&
+ "Vector shuffle index out of bounds!");
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n));
+ }
+
+ const char *Name = va_arg(va, const char *);
+ va_end(va);
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+
+ return Builder.CreateShuffleVector(V1, V2, Mask, Name);
+}
+
+llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
+ unsigned NumVals, bool isSplat) {
+ llvm::Value *Vec
+ = llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals));
+
+ for (unsigned i = 0, e = NumVals; i != e; ++i) {
+ llvm::Value *Val = isSplat ? Vals[0] : Vals[i];
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp");
+ }
+
+ return Vec;
+}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..51f9a76
--- /dev/null
+++ b/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,644 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+ llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(E);
+ // FIXME: This bitcast should just be made an invariant on the Runtime.
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+ // Untyped selector.
+ // Note that this implementation allows for non-constant strings to be passed
+ // as arguments to @selector(). Currently, the only thing preventing this
+ // behaviour is the type checking in the front end.
+ return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+ // FIXME: This should pass the Decl not the name.
+ return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
+}
+
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
+ // Only the lookup mechanism and first two arguments of the method
+ // implementation vary between runtimes. We can get the receiver and
+ // arguments in generic code.
+
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ const Expr *ReceiverExpr = E->getReceiver();
+ bool isSuperMessage = false;
+ bool isClassMessage = false;
+ // Find the receiver
+ llvm::Value *Receiver;
+ if (!ReceiverExpr) {
+ const ObjCInterfaceDecl *OID = E->getClassInfo().first;
+
+ // Very special case, super send in class method. The receiver is
+ // self (the class object) and the send uses super semantics.
+ if (!OID) {
+ assert(E->getClassName()->isStr("super") &&
+ "Unexpected missing class interface in message send.");
+ isSuperMessage = true;
+ Receiver = LoadObjCSelf();
+ } else {
+ Receiver = Runtime.GetClass(Builder, OID);
+ }
+
+ isClassMessage = true;
+ } else if (isa<ObjCSuperExpr>(E->getReceiver())) {
+ isSuperMessage = true;
+ Receiver = LoadObjCSelf();
+ } else {
+ Receiver = EmitScalarExpr(E->getReceiver());
+ }
+
+ CallArgList Args;
+ EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+
+ if (isSuperMessage) {
+ // super is only valid in an Objective-C method
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return Runtime.GenerateMessageSendSuper(*this, E->getType(),
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args);
+ }
+ return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
+ Receiver, isClassMessage, Args,
+ E->getMethodDecl());
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ FunctionArgList Args;
+ llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+ const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
+ CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+ Args.push_back(std::make_pair(OMD->getSelfDecl(),
+ OMD->getSelfDecl()->getType()));
+ Args.push_back(std::make_pair(OMD->getCmdDecl(),
+ OMD->getCmdDecl()->getType()));
+
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI)
+ Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+
+ StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocEnd());
+}
+
+/// Generate an Objective-C method. An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+ // Check if we should generate debug info for this method.
+ if (CGM.getDebugInfo() && !OMD->hasAttr<NodebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+ StartObjCMethod(OMD, OMD->getClassInterface());
+ EmitStmt(OMD->getBody(getContext()));
+ FinishFunction(OMD->getBodyRBrace(getContext()));
+}
+
+// FIXME: I wasn't sure about the synthesis approach. If we end up generating an
+// AST for the whole body we can just fall back to having a GenerateFunction
+// which takes the body Stmt.
+
+/// GenerateObjCGetter - Generate an Objective-C property getter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+ assert(OMD && "Invalid call to generate getter (empty method)");
+ // FIXME: This is rather murky, we create this here since they will not have
+ // been created by Sema for us.
+ OMD->createImplicitParams(getContext(), IMP->getClassInterface());
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ // Determine if we should use an objc_getProperty call for
+ // this. Non-atomic properties are directly evaluated.
+ // atomic 'copy' and 'retain' properties are also directly
+ // evaluated in gc-only mode.
+ if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (PD->getSetterKind() == ObjCPropertyDecl::Copy ||
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *GetPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+
+ if (!GetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the
+ // runtime already should have computed it to build the function.
+ RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args),
+ GetPropertyFn, Args);
+ // We need to fix the type here. Ivars with copy & retain are
+ // always objects so we don't need to worry about complex or
+ // aggregates.
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ } else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+ if (hasAggregateLLVMType(Ivar->getType())) {
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ }
+ else {
+ CodeGenTypes &Types = CGM.getTypes();
+ RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ }
+ }
+
+ FinishFunction();
+}
+
+/// GenerateObjCSetter - Generate an Objective-C property setter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+ assert(OMD && "Invalid call to generate setter (empty method)");
+ // FIXME: This is rather murky, we create this here since they will not have
+ // been created by Sema for us.
+ OMD->createImplicitParams(getContext(), IMP->getClassInterface());
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
+ bool IsAtomic =
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
+
+ // Determine if we should use an objc_setProperty call for
+ // this. Properties with 'copy' semantics always use it, as do
+ // non-atomic properties with 'release' semantics as long as we are
+ // not in gc-only mode.
+ if (IsCopy ||
+ (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *SetPropertyFn =
+ CGM.getObjCRuntime().GetPropertySetFunction();
+
+ if (!SetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsId =
+ Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
+ Types.ConvertType(IdTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
+ getContext().BoolTy));
+ Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
+ getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args),
+ SetPropertyFn, Args);
+ } else {
+ SourceLocation Loc = PD->getLocation();
+ ValueDecl *Self = OMD->getSelfDecl();
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ DeclRefExpr Base(Self, Self->getType(), Loc);
+ ParmVarDecl *ArgDecl = *OMD->param_begin();
+ DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
+ ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base,
+ true, true);
+ BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ }
+
+ FinishFunction();
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ // See if we need to lazily forward self inside a block literal.
+ BlockForwardSelf();
+ return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+ const PointerType *PTy =
+ cast<PointerType>(getContext().getCanonicalType(selfDecl->getType()));
+ return PTy->getPointeeType();
+}
+
+RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
+ const Selector &S) {
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ Exp->getType(),
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ CallArgList());
+
+}
+
+RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getGetterName();
+ if (isa<ObjCSuperExpr>(E->getBase()))
+ return EmitObjCSuperPropertyGet(E, S);
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Exp->getType(), S,
+ EmitScalarExpr(E->getBase()),
+ false, CallArgList());
+ }
+ else {
+ const ObjCKVCRefExpr *KE = cast<ObjCKVCRefExpr>(Exp);
+ Selector S = KE->getGetterMethod()->getSelector();
+ llvm::Value *Receiver;
+ if (KE->getClassProp()) {
+ const ObjCInterfaceDecl *OID = KE->getClassProp();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ }
+ else if (isa<ObjCSuperExpr>(KE->getBase()))
+ return EmitObjCSuperPropertyGet(KE, S);
+ else
+ Receiver = EmitScalarExpr(KE->getBase());
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Exp->getType(), S,
+ Receiver,
+ KE->getClassProp() != 0, CallArgList());
+ }
+}
+
+void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
+ const Selector &S,
+ RValue Src) {
+ CallArgList Args;
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ Args.push_back(std::make_pair(Src, Exp->getType()));
+ CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ Exp->getType(),
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args);
+ return;
+}
+
+void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
+ RValue Src) {
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getSetterName();
+ if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ }
+ CallArgList Args;
+ Args.push_back(std::make_pair(Src, E->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ EmitScalarExpr(E->getBase()),
+ false, Args);
+ }
+ else if (const ObjCKVCRefExpr *E = dyn_cast<ObjCKVCRefExpr>(Exp)) {
+ Selector S = E->getSetterMethod()->getSelector();
+ CallArgList Args;
+ llvm::Value *Receiver;
+ if (E->getClassProp()) {
+ const ObjCInterfaceDecl *OID = E->getClassProp();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ }
+ else if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ }
+ else
+ Receiver = EmitScalarExpr(E->getBase());
+ Args.push_back(std::make_pair(Src, E->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ Receiver,
+ E->getClassProp() != 0, Args);
+ }
+ else
+ assert (0 && "bad expression node in EmitObjCPropertySet");
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+ llvm::Constant *EnumerationMutationFn =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
+ llvm::Value *DeclAddress;
+ QualType ElementTy;
+
+ if (!EnumerationMutationFn) {
+ CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+ return;
+ }
+
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ EmitStmt(SD);
+ assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
+ const Decl* D = SD->getSingleDecl();
+ ElementTy = cast<ValueDecl>(D)->getType();
+ DeclAddress = LocalDeclMap[D];
+ } else {
+ ElementTy = cast<Expr>(S.getElement())->getType();
+ DeclAddress = 0;
+ }
+
+ // Fast enumeration state.
+ QualType StateTy = getContext().getObjCFastEnumerationStateType();
+ llvm::AllocaInst *StatePtr = CreateTempAlloca(ConvertType(StateTy),
+ "state.ptr");
+ StatePtr->setAlignment(getContext().getTypeAlign(StateTy) >> 3);
+ EmitMemSetToZero(StatePtr, StateTy);
+
+ // Number of elements in the items array.
+ static const unsigned NumItems = 16;
+
+ // Get selector
+ llvm::SmallVector<IdentifierInfo*, 3> II;
+ II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState"));
+ II.push_back(&CGM.getContext().Idents.get("objects"));
+ II.push_back(&CGM.getContext().Idents.get("count"));
+ Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
+ &II[0]);
+
+ QualType ItemsTy =
+ getContext().getConstantArrayType(getContext().getObjCIdType(),
+ llvm::APInt(32, NumItems),
+ ArrayType::Normal, 0);
+ llvm::Value *ItemsPtr = CreateTempAlloca(ConvertType(ItemsTy), "items.ptr");
+
+ llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(StatePtr),
+ getContext().getPointerType(StateTy)));
+
+ Args.push_back(std::make_pair(RValue::get(ItemsPtr),
+ getContext().getPointerType(ItemsTy)));
+
+ const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+ Args.push_back(std::make_pair(RValue::get(Count),
+ getContext().UnsignedLongTy));
+
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, false, Args);
+
+ llvm::Value *LimitPtr = CreateTempAlloca(UnsignedLongLTy, "limit.ptr");
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+
+ llvm::BasicBlock *NoElements = createBasicBlock("noelements");
+ llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
+
+ llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+ llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
+
+ EmitBlock(SetStartMutations);
+
+ llvm::Value *StartMutationsPtr =
+ CreateTempAlloca(UnsignedLongLTy);
+
+ llvm::Value *StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ "mutationsptr");
+
+ llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
+ "mutations");
+
+ Builder.CreateStore(StateMutations, StartMutationsPtr);
+
+ llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
+ EmitBlock(LoopStart);
+
+ llvm::Value *CounterPtr = CreateTempAlloca(UnsignedLongLTy, "counter.ptr");
+ Builder.CreateStore(Zero, CounterPtr);
+
+ llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
+ EmitBlock(LoopBody);
+
+ StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+
+ llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
+ "mutations");
+ llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
+ StartMutations,
+ "tobool");
+
+
+ llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
+ llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
+
+ Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
+
+ EmitBlock(WasMutated);
+ llvm::Value *V =
+ Builder.CreateBitCast(Collection,
+ ConvertType(getContext().getObjCIdType()),
+ "tmp");
+ CallArgList Args2;
+ Args2.push_back(std::make_pair(RValue::get(V),
+ getContext().getObjCIdType()));
+ // FIXME: We shouldn't need to get the function info here, the runtime already
+ // should have computed it to build the function.
+ EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2),
+ EnumerationMutationFn, Args2);
+
+ EmitBlock(WasNotMutated);
+
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+
+ llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
+
+ llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
+ "stateitems");
+
+ llvm::Value *CurrentItemPtr =
+ Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
+
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
+
+ // Cast the item to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem,
+ ConvertType(ElementTy), "tmp");
+
+ if (!DeclAddress) {
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(CurrentItem, LV.getAddress());
+ } else
+ Builder.CreateStore(CurrentItem, DeclAddress);
+
+ // Increment the counter.
+ Counter = Builder.CreateAdd(Counter,
+ llvm::ConstantInt::get(UnsignedLongLTy, 1));
+ Builder.CreateStore(Counter, CounterPtr);
+
+ llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
+ llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
+
+ BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(AfterBody);
+
+ llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
+
+ Counter = Builder.CreateLoad(CounterPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless");
+ Builder.CreateCondBr(IsLess, LoopBody, FetchMore);
+
+ // Fetch more elements.
+ EmitBlock(FetchMore);
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, false, Args);
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+
+ IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, LoopStart);
+
+ // No more elements.
+ EmitBlock(NoElements);
+
+ if (!DeclAddress) {
+ // If the element was not a declaration, set it to be null.
+
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
+ LV.getAddress());
+ }
+
+ EmitBlock(LoopEnd);
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
+{
+ CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
+{
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+ const ObjCAtSynchronizedStmt &S)
+{
+ CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..5e7eec9
--- /dev/null
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,1582 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the GNU runtime. The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library. These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+
+#include <map>
+
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::dyn_cast;
+
+// The version of the runtime that this class targets. Must match the version
+// in the runtime.
+static const int RuntimeVersion = 8;
+static const int NonFragileRuntimeVersion = 9;
+static const int ProtocolVersion = 2;
+
+namespace {
+class CGObjCGNU : public CodeGen::CGObjCRuntime {
+private:
+ CodeGen::CodeGenModule &CGM;
+ llvm::Module &TheModule;
+ const llvm::PointerType *SelectorTy;
+ const llvm::PointerType *PtrToInt8Ty;
+ const llvm::FunctionType *IMPTy;
+ const llvm::PointerType *IdTy;
+ const llvm::IntegerType *IntTy;
+ const llvm::PointerType *PtrTy;
+ const llvm::IntegerType *LongTy;
+ const llvm::PointerType *PtrToIntTy;
+ llvm::GlobalAlias *ClassPtrAlias;
+ llvm::GlobalAlias *MetaClassPtrAlias;
+ std::vector<llvm::Constant*> Classes;
+ std::vector<llvm::Constant*> Categories;
+ std::vector<llvm::Constant*> ConstantStrings;
+ llvm::Function *LoadFunction;
+ llvm::StringMap<llvm::Constant*> ExistingProtocols;
+ typedef std::pair<std::string, std::string> TypedSelector;
+ std::map<TypedSelector, llvm::GlobalAlias*> TypedSelectors;
+ llvm::StringMap<llvm::GlobalAlias*> UntypedSelectors;
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ llvm::Constant *NULLPtr;
+private:
+ llvm::Constant *GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
+ llvm::Constant *GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList);
+ llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ llvm::Constant *GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols);
+ llvm::Constant *GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols);
+ llvm::Constant *GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
+ llvm::Constant *MakeConstantString(const std::string &Str, const std::string
+ &Name="");
+ llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name="");
+ llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name="");
+ llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+public:
+ CGObjCGNU(CodeGen::CodeGenModule &cgm);
+ virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *);
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs);
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD);
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+ virtual llvm::Function *ModuleInitFunction();
+ virtual llvm::Function *GetPropertyGetFunction();
+ virtual llvm::Function *GetPropertySetFunction();
+ virtual llvm::Function *EnumerationMutationFunction();
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+} // end anonymous namespace
+
+
+
+static std::string SymbolNameForClass(const std::string &ClassName) {
+ return "___objc_class_name_" + ClassName;
+}
+
+static std::string SymbolNameForMethod(const std::string &ClassName, const
+ std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
+{
+ return "._objc_method_" + ClassName +"("+CategoryName+")"+
+ (isClassMethod ? "+" : "-") + MethodName;
+}
+
+CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
+ : CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
+ MetaClassPtrAlias(0) {
+ IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ LongTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+
+ Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+ Zeros[1] = Zeros[0];
+ NULLPtr = llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty));
+ // C string type. Used in lots of places.
+ PtrToInt8Ty =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ // Get the selector Type.
+ SelectorTy = cast<llvm::PointerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCSelType()));
+
+ PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+ PtrTy = PtrToInt8Ty;
+
+ // Object type
+ IdTy = cast<llvm::PointerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCIdType()));
+
+ // IMP type
+ std::vector<const llvm::Type*> IMPArgs;
+ IMPArgs.push_back(IdTy);
+ IMPArgs.push_back(SelectorTy);
+ IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
+}
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+ ClassName = Builder.CreateStructGEP(ClassName, 0);
+
+ std::vector<const llvm::Type*> Params(1, PtrToInt8Ty);
+ llvm::Constant *ClassLookupFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy,
+ Params,
+ true),
+ "objc_lookup_class");
+ return Builder.CreateCall(ClassLookupFn, ClassName);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+ llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
+ if (US == 0)
+ US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::InternalLinkage,
+ ".objc_untyped_selector_alias",
+ NULL, &TheModule);
+
+ return Builder.CreateLoad(US);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+
+ std::string SelName = Method->getSelector().getAsString();
+ std::string SelTypes;
+ CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ // Typed selectors
+ TypedSelector Selector = TypedSelector(SelName,
+ SelTypes);
+
+ // If it's already cached, return it.
+ if (TypedSelectors[Selector])
+ {
+ return Builder.CreateLoad(TypedSelectors[Selector]);
+ }
+
+ // If it isn't, cache it.
+ llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
+ llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::InternalLinkage, SelName,
+ NULL, &TheModule);
+ TypedSelectors[Selector] = Sel;
+
+ return Builder.CreateLoad(Sel);
+}
+
+llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
+ const std::string &Name) {
+ llvm::Constant * ConstStr = llvm::ConstantArray::get(Str);
+ ConstStr = new llvm::GlobalVariable(ConstStr->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ ConstStr, Name, &TheModule);
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ return new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name, &TheModule);
+}
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ return new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name, &TheModule);
+}
+
+/// Generate an NSConstantString object.
+//TODO: In case there are any crazy people still using the GNU runtime without
+//an OpenStep implementation, this should let them select their own class for
+//constant strings.
+llvm::Constant *CGObjCGNU::GenerateConstantString(const ObjCStringLiteral *SL) {
+ std::string Str(SL->getString()->getStrData(),
+ SL->getString()->getByteLength());
+ std::vector<llvm::Constant*> Ivars;
+ Ivars.push_back(NULLPtr);
+ Ivars.push_back(MakeConstantString(Str));
+ Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+ llvm::Constant *ObjCStr = MakeGlobal(
+ llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ Ivars, ".objc_str");
+ ConstantStrings.push_back(
+ llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty));
+ return ObjCStr;
+}
+
+///Generates a message send where the super is the receiver. This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs) {
+ llvm::Value *cmd = GetSelector(CGF.Builder, Sel);
+
+ CallArgList ActualArgs;
+
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
+ CGF.getContext().getObjCIdType()));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
+ const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false);
+
+ llvm::Value *ReceiverClass = 0;
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = 0;
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_class");
+ }
+ ReceiverClass = CGF.Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
+ } else {
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime load
+ // function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_class_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
+ }
+ }
+ // Cast the pointer to a simplified version of the class structure
+ ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(llvm::StructType::get(IdTy, IdTy, NULL)));
+ // Get the superclass pointer
+ ReceiverClass = CGF.Builder.CreateStructGEP(ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass = CGF.Builder.CreateLoad(ReceiverClass);
+ // Construct the structure used to look up the IMP
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(Receiver->getType(),
+ IdTy, NULL);
+ llvm::Value *ObjCSuper = CGF.Builder.CreateAlloca(ObjCSuperTy);
+
+ CGF.Builder.CreateStore(Receiver, CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+ CGF.Builder.CreateStore(ReceiverClass,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ // Get the IMP
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
+ Params.push_back(SelectorTy);
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup_super");
+
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+ llvm::Value *imp = CGF.Builder.CreateCall(lookupFunction, lookupArgs,
+ lookupArgs+2);
+
+ return CGF.EmitCall(FnInfo, imp, ActualArgs);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ llvm::Value *cmd;
+ if (Method)
+ cmd = GetSelector(CGF.Builder, Method);
+ else
+ cmd = GetSelector(CGF.Builder, Sel);
+ CallArgList ActualArgs;
+
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
+ CGF.getContext().getObjCIdType()));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
+ const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false);
+
+ llvm::Value *imp;
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Receiver->getType());
+ Params.push_back(SelectorTy);
+ // For sender-aware dispatch, we pass the sender as the third argument to a
+ // lookup function. When sending messages from C code, the sender is nil.
+ // objc_msg_lookup_sender(id receiver, SEL selector, id sender);
+ if (CGM.getContext().getLangOptions().ObjCSenderDispatch) {
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+ Params.push_back(self->getType());
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup_sender");
+
+ imp = CGF.Builder.CreateCall3(lookupFunction, Receiver, cmd, self);
+ } else {
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup");
+
+ imp = CGF.Builder.CreateCall2(lookupFunction, Receiver, cmd);
+ }
+
+ return CGF.EmitCall(FnInfo, imp, ActualArgs);
+}
+
+/// Generates a MethodList. Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ llvm::PointerType::getUnqual(IMPTy), //Method pointer
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ Elements.clear();
+ if (llvm::Constant *Method =
+ TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+ MethodSels[i].getAsString(),
+ isClassMethodList))) {
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(MethodSels[i].getAsString());
+ Elements.push_back(llvm::ConstantExpr::getGetElementPtr(C, Zeros, 2));
+ Elements.push_back(
+ llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ llvm::PointerType::getUnqual(IMPTy));
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+ }
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+ Methods.size());
+ llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+
+ // Structure containing list pointer, array and array count
+ llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
+ llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get();
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::get(NextPtrTy,
+ IntTy,
+ ObjCMethodArrayTy,
+ NULL);
+ // Refine next pointer type to concrete type
+ llvm::cast<llvm::OpaqueType>(
+ OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy);
+ ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
+
+ Methods.clear();
+ Methods.push_back(llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(ObjCMethodListTy)));
+ Methods.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ MethodTypes.size()));
+ Methods.push_back(MethodArray);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+}
+
+/// Generates an IvarList. Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ IntTy,
+ NULL);
+ std::vector<llvm::Constant*> Ivars;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarNames[i],
+ Zeros, 2));
+ Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarTypes[i],
+ Zeros, 2));
+ Elements.push_back(IvarOffsets[i]);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+ IvarNames.size());
+
+
+ Elements.clear();
+ Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+ Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+ // Structure containing array and array count
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
+ ObjCIvarArrayTy,
+ NULL);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols) {
+ // Set up the class structure
+ // Note: Several of these are char*s when they should be ids. This is
+ // because the runtime performs this translation on load.
+ llvm::StructType *ClassTy = llvm::StructType::get(
+ PtrToInt8Ty, // class_pointer
+ PtrToInt8Ty, // super_class
+ PtrToInt8Ty, // name
+ LongTy, // version
+ LongTy, // info
+ LongTy, // instance_size
+ IVars->getType(), // ivars
+ Methods->getType(), // methods
+ // These are all filled in by the runtime, so we pretend
+ PtrTy, // dtable
+ PtrTy, // subclass_list
+ PtrTy, // sibling_class
+ PtrTy, // protocols
+ PtrTy, // gc_object_type
+ NULL);
+ llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+ llvm::Constant *NullP =
+ llvm::ConstantPointerNull::get(PtrTy);
+ // Fill in the structure
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+ Elements.push_back(SuperClass);
+ Elements.push_back(MakeConstantString(Name, ".class_name"));
+ Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+ Elements.push_back(InstanceSize);
+ Elements.push_back(IVars);
+ Elements.push_back(Methods);
+ Elements.push_back(NullP);
+ Elements.push_back(NullP);
+ Elements.push_back(NullP);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+ Elements.push_back(NullP);
+ // Create an instance of the structure
+ return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name));
+}
+
+llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+ PtrToInt8Ty,
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back( llvm::ConstantExpr::getGetElementPtr(MethodNames[i],
+ Zeros, 2));
+ Elements.push_back(
+ llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+ }
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+ MethodNames.size());
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy, Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
+ IntTy, ObjCMethodArrayTy, NULL);
+ Methods.clear();
+ Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+ Methods.push_back(Array);
+ return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+}
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols) {
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Protocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ LongTy,//FIXME: Should be size_t
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *protocol = ExistingProtocols[*iter];
+ if (!protocol)
+ protocol = GenerateEmptyProtocol(*iter);
+ llvm::Constant *Ptr =
+ llvm::ConstantExpr::getBitCast(protocol, PtrToInt8Ty);
+ Elements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Elements);
+ Elements.clear();
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+ Elements.push_back(ProtocolArray);
+ return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ const llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+ const std::string &ProtocolName) {
+ llvm::SmallVector<std::string, 0> EmptyStringVector;
+ llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ ASTContext &Context = CGM.getContext();
+ std::string ProtocolName = PD->getNameAsString();
+ llvm::SmallVector<std::string, 16> Protocols;
+ for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
+ E = PD->protocol_end(); PI != E; ++PI)
+ Protocols.push_back((*PI)->getNameAsString());
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(Context),
+ E = PD->instmeth_end(Context); iter != E; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
+ InstanceMethodNames.push_back(
+ CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ // Collect information about class methods:
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCProtocolDecl::classmeth_iterator
+ iter = PD->classmeth_begin(Context),
+ endIter = PD->classmeth_end(Context) ; iter != endIter ; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodNames.push_back(
+ CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ ExistingProtocols[ProtocolName] =
+ llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+ ".objc_protocol"), IdTy);
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ std::string CategoryName = OCD->getNameAsString();
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ iter = OCD->instmeth_begin(CGM.getContext()),
+ endIter = OCD->instmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ iter = OCD->classmeth_begin(CGM.getContext()),
+ endIter = OCD->classmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCInterfaceDecl *ClassDecl = OCD->getClassInterface();
+ const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+ false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+ PtrTy));
+ // Protocol list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(
+ GenerateProtocolList(Protocols), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, PtrTy,
+ PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ std::string SuperClassName;
+ if (SuperClassDecl)
+ SuperClassName = SuperClassDecl->getNameAsString();
+
+ // Get the class name
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+
+ // Get the size of instances.
+ int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
+
+ // Collect information about instance variables.
+ llvm::SmallVector<llvm::Constant*, 16> IvarNames;
+ llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
+ llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
+ // For non-fragile ivars, set the instance size to 0 - {the size of just this
+ // class}. The runtime will then set this to the correct value on load.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ instanceSize = 0 - (instanceSize - superInstanceSize);
+ }
+ for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
+ endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+ // Store the name
+ IvarNames.push_back(CGM.GetAddrOfConstantCString((*iter)
+ ->getNameAsString()));
+ // Get the type encoding for this ivar
+ std::string TypeStr;
+ Context.getObjCEncodingForType((*iter)->getType(), TypeStr);
+ IvarTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ // Get the offset
+ uint64_t Offset;
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter) -
+ superInstanceSize;
+ ObjCIvarOffsetVariable(ClassDecl, *iter);
+ } else {
+ Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter);
+ }
+ IvarOffsets.push_back(
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset));
+ }
+
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCImplementationDecl::instmeth_iterator
+ iter = OID->instmeth_begin(CGM.getContext()),
+ endIter = OID->instmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ for (ObjCImplDecl::propimpl_iterator
+ iter = OID->propimpl_begin(CGM.getContext()),
+ endIter = OID->propimpl_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ InstanceMethodSels.push_back(getter->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ InstanceMethodSels.push_back(setter->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ }
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCImplementationDecl::classmeth_iterator
+ iter = OID->classmeth_begin(CGM.getContext()),
+ endIter = OID->classmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+
+
+ // Get the superclass pointer.
+ llvm::Constant *SuperClass;
+ if (!SuperClassName.empty()) {
+ SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+ } else {
+ SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ }
+ // Empty vector used to construct empty method lists
+ llvm::SmallVector<llvm::Constant*, 1> empty;
+ // Generate the method and instance variable lists
+ llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+ InstanceMethodSels, InstanceMethodTypes, false);
+ llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+ ClassMethodSels, ClassMethodTypes, true);
+ llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+ IvarOffsets);
+ //Generate metaclass for class methods
+ llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+ NULLPtr, 0x2L, /*name*/"", 0, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr);
+
+ // Generate the class structure
+ llvm::Constant *ClassStruct =
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x1L,
+ ClassName.c_str(), 0,
+ llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+ MethodList, GenerateProtocolList(Protocols));
+
+ // Resolve the class aliases, if they exist.
+ if (ClassPtrAlias) {
+ ClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias = 0;
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias = 0;
+ }
+
+ // Add class structure to list to be added to the symtab later
+ ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+ Classes.push_back(ClassStruct);
+}
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+ // Only emit an ObjC load function if no Objective-C stuff has been called
+ if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+ ExistingProtocols.empty() && TypedSelectors.empty() &&
+ UntypedSelectors.empty())
+ return NULL;
+
+ const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ SelectorTy->getElementType());
+ const llvm::Type *SelStructPtrTy = SelectorTy;
+ bool isSelOpaque = false;
+ if (SelStructTy == 0) {
+ SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
+ SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ isSelOpaque = true;
+ }
+
+ // Name the ObjC types to make the IR a bit easier to read
+ TheModule.addTypeName(".objc_selector", SelStructPtrTy);
+ TheModule.addTypeName(".objc_id", IdTy);
+ TheModule.addTypeName(".objc_imp", IMPTy);
+
+ std::vector<llvm::Constant*> Elements;
+ llvm::Constant *Statics = NULLPtr;
+ // Generate statics list:
+ if (ConstantStrings.size()) {
+ llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ ConstantStrings.size() + 1);
+ ConstantStrings.push_back(NULLPtr);
+ Elements.push_back(MakeConstantString("NSConstantString",
+ ".objc_static_class_name"));
+ Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+ ConstantStrings));
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::Type *StaticsListPtrTy = llvm::PointerType::getUnqual(StaticsListTy);
+ Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType::get(StaticsListPtrTy, 2);
+ Elements.clear();
+ Elements.push_back(Statics);
+ Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+ Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+ }
+ // Array of classes, categories, and constant objects
+ llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Classes.size() + Categories.size() + 2);
+ llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
+ llvm::Type::Int16Ty,
+ llvm::Type::Int16Ty,
+ ClassListTy, NULL);
+
+ Elements.clear();
+ // Pointer to an array of selectors used in this module.
+ std::vector<llvm::Constant*> Selectors;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end();
+ iter != iterEnd ; ++iter) {
+ Elements.push_back(MakeConstantString(iter->first.first, ".objc_sel_name"));
+ Elements.push_back(MakeConstantString(iter->first.second,
+ ".objc_sel_types"));
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ Elements.push_back(
+ MakeConstantString(iter->getKeyData(), ".objc_sel_name"));
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ // Number of static selectors
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() ));
+ llvm::Constant *SelectorList = MakeGlobal(
+ llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
+ ".objc_selector_list");
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ SelStructPtrTy));
+
+ // Now that all of the static selectors exist, create pointers to them.
+ int index = 0;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy,
+ true, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ ".objc_sel_ptr", &TheModule);
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->setAliasee(SelPtr);
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; iter++) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy, true,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ ".objc_sel_ptr", &TheModule);
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->setAliasee(SelPtr);
+ }
+ // Number of classes defined.
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
+ Classes.size()));
+ // Number of categories defined
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
+ Categories.size()));
+ // Create an array of classes, then categories, then static object instances
+ Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+ // NULL-terminated list of static object instances (mainly constant strings)
+ Classes.push_back(Statics);
+ Classes.push_back(NULLPtr);
+ llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+ Elements.push_back(ClassList);
+ // Construct the symbol table
+ llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+
+ // The symbol table is contained in a module which has some version-checking
+ // constants
+ llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
+ Elements.clear();
+ // Runtime version used for compatibility checking.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ NonFragileRuntimeVersion));
+ } else {
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+ }
+ // sizeof(ModuleTy)
+ llvm::TargetData td = llvm::TargetData::TargetData(&TheModule);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, td.getTypeSizeInBits(ModuleTy)/8));
+ //FIXME: Should be the path to the file where this module was declared
+ Elements.push_back(NULLPtr);
+ Elements.push_back(SymTab);
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+
+ // Create the load function calling the runtime entry point with the module
+ // structure
+ std::vector<const llvm::Type*> VoidArgs;
+ llvm::Function * LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::VoidTy, VoidArgs, false),
+ llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+ &TheModule);
+ llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create("entry", LoadFunction);
+ CGBuilderTy Builder;
+ Builder.SetInsertPoint(EntryBB);
+
+ std::vector<const llvm::Type*> Params(1,
+ llvm::PointerType::getUnqual(ModuleTy));
+ llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::VoidTy, Params, true), "__objc_exec_class");
+ Builder.CreateCall(Register, Module);
+ Builder.CreateRetVoid();
+
+ return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ std::string CategoryName = OCD ? OCD->getNameAsString() : "";
+ std::string ClassName = OMD->getClassInterface()->getNameAsString();
+ std::string MethodName = OMD->getSelector().getAsString();
+ bool isClassMethod = !OMD->isInstanceMethod();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+ MethodName, isClassMethod);
+
+ llvm::Function *Method = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
+ return Method;
+}
+
+llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(BoolTy);
+ // void objc_getProperty (id, SEL, ptrdiff_t, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(IdTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getProperty"));
+}
+
+llvm::Function *CGObjCGNU::GetPropertySetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(IdTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setProperty"));
+}
+
+llvm::Function *CGObjCGNU::EnumerationMutationFunction() {
+ std::vector<const llvm::Type*> Params(1, IdTy);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::VoidTy, Params, true),
+ "objc_enumerationMutation"));
+}
+
+void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ // Pointer to the personality function
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ std::vector<const llvm::Type*>(), true),
+ "__gnu_objc_personality_v0");
+ Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ llvm::Value *RethrowFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false), "_Unwind_Resume_or_Rethrow");
+
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow");
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // GNU runtime does not currently support @synchronized()
+ if (!isTry) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncEnter, SyncArg);
+ }
+
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ // Emit the statements in the @try {} block
+ CGF.setInvokeDest(TryHandler);
+
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+ // Jump to @finally if there is no exception
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the handlers
+ CGF.EmitBlock(TryHandler);
+
+ // Get the correct versions of the exception handling intrinsics
+ llvm::TargetData td = llvm::TargetData::TargetData(&TheModule);
+ int PointerWidth = td.getTypeSizeInBits(PtrTy);
+ assert((PointerWidth == 32 || PointerWidth == 64) &&
+ "Can't yet handle exceptions if pointers are not 32 or 64 bits");
+ llvm::Value *llvm_eh_exception =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector = PointerWidth == 32 ?
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i32) :
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i64);
+ llvm::Value *llvm_eh_typeid_for = PointerWidth == 32 ?
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i32) :
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i64);
+
+ // Exception object
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ llvm::SmallVector<llvm::Value*, 8> ESelArgs;
+ llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+
+ ESelArgs.push_back(Exc);
+ ESelArgs.push_back(Personality);
+
+ bool HasCatchAll = false;
+ // Only @try blocks are allowed @catch blocks, but both can have @finally
+ if (isTry) {
+ if (const ObjCAtCatchStmt* CatchStmt =
+ cast<ObjCAtTryStmt>(S).getCatchStmts()) {
+ CGF.setInvokeDest(CatchInCatch);
+
+ for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+ const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
+
+ // @catch() and @catch(id) both catch any ObjC exception
+ if (!CatchDecl || CGF.getContext().isObjCIdType(CatchDecl->getType())
+ || CatchDecl->getType()->isObjCQualifiedIdType()) {
+ // Use i8* null here to signal this is a catch all, not a cleanup.
+ ESelArgs.push_back(NULLPtr);
+ HasCatchAll = true;
+ // No further catches after this one will ever by reached
+ break;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const PointerType *PT = CatchDecl->getType()->getAsPointerType();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT =
+ PT->getPointeeType()->getAsObjCInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ llvm::Value *EHType =
+ MakeConstantString(IT->getDecl()->getNameAsString());
+ ESelArgs.push_back(EHType);
+ }
+ }
+ }
+
+ // We use a cleanup unless there was already a catch all.
+ if (!HasCatchAll) {
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ }
+
+ // Find which handler was matched.
+ llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector,
+ ESelArgs.begin(), ESelArgs.end(), "selector");
+
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ const ParmVarDecl *CatchParam = Handlers[i].first;
+ const Stmt *CatchBody = Handlers[i].second;
+
+ llvm::BasicBlock *Next = 0;
+
+ // The last handler always matches.
+ if (i + 1 != e) {
+ assert(CatchParam && "Only last handler can be a catch all.");
+
+ // Test whether this block matches the type for the selector and branch
+ // to Match if it does, or to the next BB if it doesn't.
+ llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+ Next = CGF.createBasicBlock("catch.next");
+ llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for,
+ CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy));
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match,
+ Next);
+
+ CGF.EmitBlock(Match);
+ }
+
+ if (CatchBody) {
+ llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
+ CGF.ConvertType(CatchParam->getType()));
+
+ // Bind the catch parameter if it exists.
+ if (CatchParam) {
+ // CatchParam is a ParmVarDecl because of the grammar
+ // construction used to handle this, but for codegen purposes
+ // we treat this as a local decl.
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(ExcObject);
+ CGF.EmitStmt(CatchBody);
+ CGF.ObjCEHValueStack.pop_back();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ if (Next)
+ CGF.EmitBlock(Next);
+ } else {
+ assert(!Next && "catchup should be last handler.");
+
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+ // The @finally block is a secondary landing pad for any exceptions thrown in
+ // @catch() blocks
+ CGF.EmitBlock(CatchInCatch);
+ Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ ESelArgs.clear();
+ ESelArgs.push_back(Exc);
+ ESelArgs.push_back(Personality);
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
+ "selector");
+ CGF.Builder.CreateCall(llvm_eh_typeid_for,
+ CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy));
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.setInvokeDest(PrevLandingPad);
+
+ CGF.EmitBlock(FinallyBlock);
+
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+ // @synchronized.
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncExit, SyncArg);
+ }
+
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ // Branch around the rethrow code.
+ CGF.EmitBranch(FinallyEnd);
+
+ CGF.EmitBlock(FinallyRethrow);
+ CGF.Builder.CreateCall(RethrowFn, CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::Value *ThrowFn =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject = Exception;
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
+
+ // Note: This may have to be an invoke, if we want to support constructs like:
+ // @try {
+ // @throw(obj);
+ // }
+ // @catch(id) ...
+ //
+ // This is effectively turning @throw into an incredibly-expensive goto, but
+ // it may happen as a result of inlining followed by missed optimizations, or
+ // as a result of stupidity.
+ llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+ if (!UnwindBB) {
+ CGF.Builder.CreateCall(ThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateInvoke(ThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
+ &ExceptionAsObject+1);
+ }
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj)
+{
+ return 0;
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ // Emit the variable and initialize it with what we think the correct value
+ // is. This allows code compiled with non-fragile ivars to work correctly
+ // when linked against code which isn't (most of the time).
+ llvm::GlobalVariable *IvarOffsetGV = CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+ llvm::ConstantInt *OffsetGuess =
+ llvm::ConstantInt::get(LongTy, Offset, "ivar");
+ IvarOffsetGV = new llvm::GlobalVariable(LongTy, false,
+ llvm::GlobalValue::CommonLinkage, OffsetGuess, Name, &TheModule);
+ }
+ return IvarOffsetGV;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD) {
+ for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(),
+ IVE = OID->ivar_end(); IVI != IVE; ++IVI)
+ if (OIVD == *IVI)
+ return OID;
+
+ // Also look in synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ Context.CollectSynthesizedIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (OIVD == Ivars[k])
+ return OID;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD);
+
+ return 0;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ if (CGF.getContext().getLangOptions().ObjCNonFragileABI)
+ {
+ Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),
+ false, "ivar");
+ }
+ uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(LongTy, Offset, "ivar");
+}
+
+CodeGen::CGObjCRuntime *CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM){
+ return new CGObjCGNU(CGM);
+}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..8f1404d
--- /dev/null
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,5780 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Target/TargetData.h"
+#include <sstream>
+
+using namespace clang;
+using namespace CodeGen;
+
+// Common CGObjCRuntime functions, these don't belong here, but they
+// don't belong in CGObjCRuntime either so we will live with it for
+// now.
+
+/// FindIvarInterface - Find the interface containing the ivar.
+///
+/// FIXME: We shouldn't need to do this, the containing context should
+/// be fixed.
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD,
+ unsigned &Index) {
+ // FIXME: The index here is closely tied to how
+ // ASTContext::getObjCLayout is implemented. This should be fixed to
+ // get the information from the layout directly.
+ Index = 0;
+ for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(),
+ IVE = OID->ivar_end(); IVI != IVE; ++IVI, ++Index)
+ if (OIVD == *IVI)
+ return OID;
+
+ // Also look in synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ Context.CollectSynthesizedIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (OIVD == Ivars[k])
+ return OID;
+ ++Index;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD, Index);
+
+ return 0;
+}
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ unsigned Index;
+ const ObjCInterfaceDecl *Container =
+ FindIvarInterface(CGM.getContext(), OID, Ivar, Index);
+ assert(Container && "Unable to find ivar container");
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && ID->getClassInterface() == Container)
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 8;
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 8;
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ QualType IvarTy = Ivar->getType();
+ const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+ V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+ if (Ivar->isBitField()) {
+ // We need to compute the bit offset for the bit-field, the offset
+ // is to the byte. Note, there is a subtle invariant here: we can
+ // only call this routine on non-sythesized ivars but we may be
+ // called for synthesized ivars. However, a synthesized ivar can
+ // never be a bit-field so this is safe.
+ uint64_t BitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar) % 8;
+
+ uint64_t BitFieldSize =
+ Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
+ return LValue::MakeBitfield(V, BitOffset, BitFieldSize,
+ IvarTy->isSignedIntegerType(),
+ IvarTy.getCVRQualifiers()|CVRQualifiers);
+ }
+
+ LValue LV = LValue::MakeAddr(V, IvarTy.getCVRQualifiers()|CVRQualifiers,
+ CGF.CGM.getContext().getObjCGCAttrKind(IvarTy));
+ LValue::SetObjCIvar(LV, true);
+ return LV;
+}
+
+///
+
+namespace {
+
+ typedef std::vector<llvm::Constant*> ConstantVector;
+
+ // FIXME: We should find a nicer way to make the labels for metadata, string
+ // concatenation is lame.
+
+class ObjCCommonTypesHelper {
+private:
+ llvm::Constant *getMessageSendFn() const {
+ // id objc_msgSend (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend");
+ }
+
+ llvm::Constant *getMessageSendStretFn() const {
+ // id objc_msgSend_stret (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, true),
+ "objc_msgSend_stret");
+
+ }
+
+ llvm::Constant *getMessageSendFpretFn() const {
+ // FIXME: This should be long double on x86_64?
+ // [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::DoubleTy,
+ Params,
+ true),
+ "objc_msgSend_fpret");
+
+ }
+
+ llvm::Constant *getMessageSendSuperFn() const {
+ // id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperFn2() const {
+ // id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper2";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn() const {
+ // void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, true),
+ "objc_msgSendSuper_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn2() const {
+ // void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, true),
+ "objc_msgSendSuper2_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn();
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn2() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn2();
+ }
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+
+public:
+ const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ const llvm::Type *Int8PtrTy;
+
+ /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+ const llvm::Type *ObjectPtrTy;
+
+ /// PtrObjectPtrTy - LLVM type for id *
+ const llvm::Type *PtrObjectPtrTy;
+
+ /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+ const llvm::Type *SelectorPtrTy;
+ /// ProtocolPtrTy - LLVM type for external protocol handles
+ /// (typeof(Protocol))
+ const llvm::Type *ExternalProtocolPtrTy;
+
+ // SuperCTy - clang type for struct objc_super.
+ QualType SuperCTy;
+ // SuperPtrCTy - clang type for struct objc_super *.
+ QualType SuperPtrCTy;
+
+ /// SuperTy - LLVM type for struct objc_super.
+ const llvm::StructType *SuperTy;
+ /// SuperPtrTy - LLVM type for struct objc_super *.
+ const llvm::Type *SuperPtrTy;
+
+ /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+ /// in GCC parlance).
+ const llvm::StructType *PropertyTy;
+
+ /// PropertyListTy - LLVM type for struct objc_property_list
+ /// (_prop_list_t in GCC parlance).
+ const llvm::StructType *PropertyListTy;
+ /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *PropertyListPtrTy;
+
+ // MethodTy - LLVM type for struct objc_method.
+ const llvm::StructType *MethodTy;
+
+ /// CacheTy - LLVM type for struct objc_cache.
+ const llvm::Type *CacheTy;
+ /// CachePtrTy - LLVM type for struct objc_cache *.
+ const llvm::Type *CachePtrTy;
+
+ llvm::Constant *getGetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+ llvm::SmallVector<QualType,16> Params;
+ QualType IdType = Ctx.getObjCIdType();
+ QualType SelType = Ctx.getObjCSelType();
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(IdType, Params), false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+ }
+
+ llvm::Constant *getSetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ llvm::SmallVector<QualType,16> Params;
+ QualType IdType = Ctx.getObjCIdType();
+ QualType SelType = Ctx.getObjCSelType();
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params), false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+ }
+
+ llvm::Constant *getEnumerationMutationFn() {
+ // void objc_enumerationMutation (id)
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ }
+
+ /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+ llvm::Constant *getGcReadWeakFn() {
+ // id objc_read_weak (id *)
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ }
+
+ /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+ llvm::Constant *getGcAssignWeakFn() {
+ // id objc_assign_weak (id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ }
+
+ /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+ llvm::Constant *getGcAssignGlobalFn() {
+ // id objc_assign_global(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ }
+
+ /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+ llvm::Constant *getGcAssignIvarFn() {
+ // id objc_assign_ivar(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ }
+
+ /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+ llvm::Constant *getGcAssignStrongCastFn() {
+ // id objc_assign_global(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ }
+
+ /// ExceptionThrowFn - LLVM objc_exception_throw function.
+ llvm::Constant *getExceptionThrowFn() {
+ // void objc_exception_throw(id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+ }
+
+ /// SyncEnterFn - LLVM object_sync_enter function.
+ llvm::Constant *getSyncEnterFn() {
+ // void objc_sync_enter (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ }
+
+ /// SyncExitFn - LLVM object_sync_exit function.
+ llvm::Constant *getSyncExitFn() {
+ // void objc_sync_exit (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ }
+
+ llvm::Constant *getSendFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendStretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+ }
+
+ ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCCommonTypesHelper(){}
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+ /// SymtabTy - LLVM type for struct objc_symtab.
+ const llvm::StructType *SymtabTy;
+ /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+ const llvm::Type *SymtabPtrTy;
+ /// ModuleTy - LLVM type for struct objc_module.
+ const llvm::StructType *ModuleTy;
+
+ /// ProtocolTy - LLVM type for struct objc_protocol.
+ const llvm::StructType *ProtocolTy;
+ /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+ const llvm::Type *ProtocolPtrTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension.
+ const llvm::StructType *ProtocolExtensionTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension *.
+ const llvm::Type *ProtocolExtensionPtrTy;
+ /// MethodDescriptionTy - LLVM type for struct
+ /// objc_method_description.
+ const llvm::StructType *MethodDescriptionTy;
+ /// MethodDescriptionListTy - LLVM type for struct
+ /// objc_method_description_list.
+ const llvm::StructType *MethodDescriptionListTy;
+ /// MethodDescriptionListPtrTy - LLVM type for struct
+ /// objc_method_description_list *.
+ const llvm::Type *MethodDescriptionListPtrTy;
+ /// ProtocolListTy - LLVM type for struct objc_property_list.
+ const llvm::Type *ProtocolListTy;
+ /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *ProtocolListPtrTy;
+ /// CategoryTy - LLVM type for struct objc_category.
+ const llvm::StructType *CategoryTy;
+ /// ClassTy - LLVM type for struct objc_class.
+ const llvm::StructType *ClassTy;
+ /// ClassPtrTy - LLVM type for struct objc_class *.
+ const llvm::Type *ClassPtrTy;
+ /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+ const llvm::StructType *ClassExtensionTy;
+ /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+ const llvm::Type *ClassExtensionPtrTy;
+ // IvarTy - LLVM type for struct objc_ivar.
+ const llvm::StructType *IvarTy;
+ /// IvarListTy - LLVM type for struct objc_ivar_list.
+ const llvm::Type *IvarListTy;
+ /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+ const llvm::Type *IvarListPtrTy;
+ /// MethodListTy - LLVM type for struct objc_method_list.
+ const llvm::Type *MethodListTy;
+ /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+ const llvm::Type *MethodListPtrTy;
+
+ /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+ const llvm::Type *ExceptionDataTy;
+
+ /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+ llvm::Constant *getExceptionTryEnterFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "objc_exception_try_enter");
+ }
+
+ /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+ llvm::Constant *getExceptionTryExitFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "objc_exception_try_exit");
+ }
+
+ /// ExceptionExtractFn - LLVM objc_exception_extract function.
+ llvm::Constant *getExceptionExtractFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, false),
+ "objc_exception_extract");
+
+ }
+
+ /// ExceptionMatchFn - LLVM objc_exception_match function.
+ llvm::Constant *getExceptionMatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ClassPtrTy);
+ Params.push_back(ObjectPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ Params, false),
+ "objc_exception_match");
+
+ }
+
+ /// SetJmpFn - LLVM _setjmp function.
+ llvm::Constant *getSetJmpFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(llvm::Type::Int32Ty));
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ Params, false),
+ "_setjmp");
+
+ }
+
+public:
+ ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCTypesHelper() {}
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+ // MethodListnfABITy - LLVM for struct _method_list_t
+ const llvm::StructType *MethodListnfABITy;
+
+ // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+ const llvm::Type *MethodListnfABIPtrTy;
+
+ // ProtocolnfABITy = LLVM for struct _protocol_t
+ const llvm::StructType *ProtocolnfABITy;
+
+ // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+ const llvm::Type *ProtocolnfABIPtrTy;
+
+ // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+ const llvm::StructType *ProtocolListnfABITy;
+
+ // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+ const llvm::Type *ProtocolListnfABIPtrTy;
+
+ // ClassnfABITy - LLVM for struct _class_t
+ const llvm::StructType *ClassnfABITy;
+
+ // ClassnfABIPtrTy - LLVM for struct _class_t*
+ const llvm::Type *ClassnfABIPtrTy;
+
+ // IvarnfABITy - LLVM for struct _ivar_t
+ const llvm::StructType *IvarnfABITy;
+
+ // IvarListnfABITy - LLVM for struct _ivar_list_t
+ const llvm::StructType *IvarListnfABITy;
+
+ // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+ const llvm::Type *IvarListnfABIPtrTy;
+
+ // ClassRonfABITy - LLVM for struct _class_ro_t
+ const llvm::StructType *ClassRonfABITy;
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ const llvm::Type *ImpnfABITy;
+
+ // CategorynfABITy - LLVM for struct _category_t
+ const llvm::StructType *CategorynfABITy;
+
+ // New types for nonfragile abi messaging.
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *MessageRefTy;
+ // MessageRefCTy - clang type for struct _message_ref_t
+ QualType MessageRefCTy;
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ const llvm::Type *MessageRefPtrTy;
+ // MessageRefCPtrTy - clang type for struct _message_ref_t*
+ QualType MessageRefCPtrTy;
+
+ // MessengerTy - Type of the messenger (shown as IMP above)
+ const llvm::FunctionType *MessengerTy;
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *SuperMessageRefTy;
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ const llvm::Type *SuperMessageRefPtrTy;
+
+ llvm::Constant *getMessageSendFixupFn() {
+ // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fixup");
+ }
+
+ llvm::Constant *getMessageSendFpretFixupFn() {
+ // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fpret_fixup");
+ }
+
+ llvm::Constant *getMessageSendStretFixupFn() {
+ // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_stret_fixup");
+ }
+
+ llvm::Constant *getMessageSendIdFixupFn() {
+ // id objc_msgSendId_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendId_fixup");
+ }
+
+ llvm::Constant *getMessageSendIdStretFixupFn() {
+ // id objc_msgSendId_stret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendId_stret_fixup");
+ }
+ llvm::Constant *getMessageSendSuper2FixupFn() {
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_stret_fixup");
+ }
+
+
+
+ /// EHPersonalityPtr - LLVM value for an i8* to the Objective-C
+ /// exception personality function.
+ llvm::Value *getEHPersonalityPtr() {
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ std::vector<const llvm::Type*>(),
+ true),
+ "__objc_personality_v0");
+ return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy);
+ }
+
+ llvm::Constant *getUnwindResumeOrRethrowFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "_Unwind_Resume_or_Rethrow");
+ }
+
+ llvm::Constant *getObjCEndCatchFn() {
+ std::vector<const llvm::Type*> Params;
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "objc_end_catch");
+
+ }
+
+ llvm::Constant *getObjCBeginCatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+ Params, false),
+ "objc_begin_catch");
+ }
+
+ const llvm::StructType *EHTypeTy;
+ const llvm::Type *EHTypePtrTy;
+
+ ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCNonFragileABITypesHelper(){}
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+ // FIXME - accessibility
+ class GC_IVAR {
+ public:
+ unsigned ivar_bytepos;
+ unsigned ivar_size;
+ GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
+ : ivar_bytepos(bytepos), ivar_size(size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const GC_IVAR &b) const {
+ return ivar_bytepos < b.ivar_bytepos;
+ }
+ };
+
+ class SKIP_SCAN {
+ public:
+ unsigned skip;
+ unsigned scan;
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ : skip(_skip), scan(_scan) {}
+ };
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+ // FIXME! May not be needing this after all.
+ unsigned ObjCABI;
+
+ // gc ivar layout bitmap calculation helper caches.
+ llvm::SmallVector<GC_IVAR, 16> SkipIvars;
+ llvm::SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ /// LazySymbols - Symbols to generate a lazy reference for. See
+ /// DefinedSymbols and FinishModule().
+ std::set<IdentifierInfo*> LazySymbols;
+
+ /// DefinedSymbols - External symbols which are defined by this
+ /// module. The symbols in this list and LazySymbols are used to add
+ /// special linker symbols which ensure that Objective-C modules are
+ /// linked properly.
+ std::set<IdentifierInfo*> DefinedSymbols;
+
+ /// ClassNames - uniqued class names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
+
+ /// MethodVarNames - uniqued method variable names.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+ /// MethodVarTypes - uniqued method type signatures. We have to use
+ /// a StringMap here because have no other unique reference.
+ llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+ /// MethodDefinitions - map of methods which have been defined in
+ /// this translation unit.
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+ /// PropertyNames - uniqued method variable names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+ /// ClassReferences - uniqued class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+ /// SelectorReferences - uniqued selector references.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+ /// Protocols - Protocols for which an objc_protocol structure has
+ /// been emitted. Forward declarations are handled by creating an
+ /// empty structure whose initializer is filled in when/if defined.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+ /// DefinedProtocols - Protocols which have actually been
+ /// defined. We should not need this, see FIXME in GenerateProtocol.
+ llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+ /// DefinedClasses - List of defined classes.
+ std::vector<llvm::GlobalValue*> DefinedClasses;
+
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
+
+ /// DefinedCategories - List of defined categories.
+ std::vector<llvm::GlobalValue*> DefinedCategories;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
+
+ /// UsedGlobals - List of globals to pack into the llvm.used metadata
+ /// to prevent them from being clobbered.
+ std::vector<llvm::GlobalVariable*> UsedGlobals;
+
+ /// GetNameForMethod - Return a name for the given method.
+ /// \param[out] NameOut - The return value.
+ void GetNameForMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ std::string &NameOut);
+
+ /// GetMethodVarName - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+ llvm::Constant *GetMethodVarName(Selector Sel);
+ llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+ llvm::Constant *GetMethodVarName(const std::string &Name);
+
+ /// GetMethodVarType - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+
+ // FIXME: This is a horrible name.
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
+ llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+ /// GetPropertyName - Return a unique constant for the given
+ /// name. The return value has type char *.
+ llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+ // FIXME: This can be dropped once string functions are unified.
+ llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container);
+
+ /// GetClassName - Return a unique constant for the given selector's
+ /// name. The return value has type char *.
+ llvm::Constant *GetClassName(IdentifierInfo *Ident);
+
+ /// BuildIvarLayout - Builds ivar layout bitmap for the class
+ /// implementation for the __strong or __weak case.
+ ///
+ llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+ bool ForStrongLayout);
+
+ void BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+ void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+
+ /// GetIvarLayoutName - Returns a unique constant for the given
+ /// ivar layout bitmap.
+ llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitPropertyList - Emit the given property list. The return
+ /// value has type PropertyListPtrTy.
+ llvm::Constant *EmitPropertyList(const std::string &Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// GetProtocolRef - Return a reference to the internal protocol
+ /// description, creating an empty one if it has not been
+ /// defined. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// CreateMetadataVar - Create a global variable with internal
+ /// linkage for use by the Objective-C runtime.
+ ///
+ /// This is a convenience wrapper which not only creates the
+ /// variable, but also sets the section and alignment and adds the
+ /// global to the UsedGlobals list.
+ ///
+ /// \param Name - The variable name.
+ /// \param Init - The variable initializer; this is also used to
+ /// define the type of the variable.
+ /// \param Section - The section the variable should go into, or 0.
+ /// \param Align - The alignment for the variable, or 0.
+ /// \param AddToUsed - Whether the variable should be added to
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(const std::string &Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed);
+
+ /// GetNamedIvarList - Return the list of ivars in the interface
+ /// itself (not including super classes and not including unnamed
+ /// bitfields).
+ ///
+ /// For the non-fragile ABI, this also includes synthesized property
+ /// ivars.
+ void GetNamedIvarList(const ObjCInterfaceDecl *OID,
+ llvm::SmallVector<ObjCIvarDecl*, 16> &Res) const;
+
+ CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+public:
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) : CGM(cgm)
+ { }
+
+ virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *SL);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD=0);
+
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+ ObjCTypesHelper ObjCTypes;
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
+ /// EmitModuleInfo - Another marker encoding module level
+ /// information.
+ void EmitModuleInfo();
+
+ /// EmitModuleSymols - Emit module symbols, the list of defined
+ /// classes and categories. The result has type SymtabPtrTy.
+ llvm::Constant *EmitModuleSymbols();
+
+ /// FinishModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishModule();
+
+ /// EmitClassExtension - Generate the class extension structure used
+ /// to store the weak ivar layout and properties. The return value
+ /// has type ClassExtensionPtrTy.
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs);
+
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass);
+
+ /// EmitMetaClass - Emit a forward reference to the class structure
+ /// for the metaclass of the given interface. The return value has
+ /// type ClassPtrTy.
+ llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClass - Emit a class structure for the metaclass of the
+ /// given implementation. The return value has type ClassPtrTy.
+ llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListPtrTy.
+ llvm::Constant *EmitMethodList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// EmitMethodDescList - Emit a method description list for a list of
+ /// method declarations.
+ /// - TypeName: The name for the type containing the methods.
+ /// - IsProtocol: True iff these methods are for a protocol.
+ /// - ClassMethds: True iff these are class methods.
+ /// - Required: When true, only "required" methods are
+ /// listed. Similarly, when false only "optional" methods are
+ /// listed. For classes this should always be true.
+ /// - begin, end: The method list to output.
+ ///
+ /// The return value has type MethodDescriptionListPtrTy.
+ llvm::Constant *EmitMethodDescList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolExtension - Generate the protocol extension
+ /// structure used to store optional instance and class methods, and
+ /// protocol properties. The return value has type
+ /// ProtocolExtensionPtrTy.
+ llvm::Constant *
+ EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+ public:
+ CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method);
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+ ObjCNonFragileABITypesHelper ObjCTypes;
+ llvm::GlobalVariable* ObjCEmptyCacheVar;
+ llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+ /// SuperClassReferences - uniqued super class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+ /// MetaClassReferences - uniqued meta class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+ /// EHTypeReferences - uniqued class ehtype references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+ /// NonLegacyDispatchMethods - List of methods for which we do *not* generate
+ /// legacy messaging dispatch.
+ llvm::DenseSet<Selector> NonLegacyDispatchMethods;
+
+ /// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+ /// NonLegacyDispatchMethods; false otherwise.
+ bool LegacyDispatchedSelector(Selector Sel);
+
+ /// FinishNonFragileABIModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishNonFragileABIModule();
+
+ /// AddModuleClassList - Add the given list of class pointers to the
+ /// module with the provided symbol and section names.
+ void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container,
+ const char *SymbolName,
+ const char *SectionName);
+
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListnfABITy.
+ llvm::Constant *EmitMethodList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods);
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListnfABIPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+ llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int offset);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class reference.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given super class reference.
+ llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClassRef - Return a Value * of the address of _class_t
+ /// meta-data
+ llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+ /// the given ivar.
+ ///
+ llvm::GlobalVariable * ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+ /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+ /// interface. The return value has type EHTypePtrTy.
+ llvm::Value *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition);
+
+ const char *getMetaclassSymbolPrefix() const {
+ return "OBJC_METACLASS_$_";
+ }
+
+ const char *getClassSymbolPrefix() const {
+ return "OBJC_CLASS_$_";
+ }
+
+ void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize);
+
+ // Shamelessly stolen from Analysis/CFRefCount.cpp
+ Selector GetNullarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(0, &II);
+ }
+
+ Selector GetUnarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(1, &II);
+ }
+
+ /// ImplementationIsNonLazy - Check whether the given category or
+ /// class implementation is "non-lazy".
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+public:
+ CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+ // FIXME. All stubs for now!
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel)
+ { return EmitSelector(Builder, Sel); }
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method)
+ { return EmitSelector(Builder, Method->getSelector()); }
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+ }
+ virtual llvm::Constant *GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+ }
+ virtual llvm::Constant *EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+ }
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::Constant *C,
+ unsigned idx0,
+ unsigned idx1) {
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, idx0),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, idx1)
+ };
+ return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Super);
+ return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm)
+{
+ ObjCABI = 1;
+ EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+ return EmitSelector(Builder, Sel);
+}
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ return EmitSelector(Builder, Method->getSelector());
+}
+
+/// Generate a constant CFString object.
+/*
+ struct __builtin_CFString {
+ const int *isa; // point to __CFConstantStringClassReference
+ int flags;
+ const char *str;
+ long length;
+ };
+*/
+
+llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ const ObjCStringLiteral *SL) {
+ return CGM.GetAddrOfConstantCFString(SL->getString());
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs) {
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to 'super' in a class method defined in a category
+ // implementation requires an odd treatment.
+ // If we are in a class method, we must retrieve the
+ // _metaclass_ for the current class, pointed at by
+ // the class's "isa" pointer. The following assumes that
+ // isa" is the first ivar in a class (which it must be).
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ }
+ else {
+ llvm::Value *MetaClassPtr = EmitMetaClassRef(Class);
+ llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1);
+ llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ Target = Super;
+ }
+ } else {
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ }
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+ return EmitLegacyMessageSend(CGF, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ return EmitLegacyMessageSend(CGF, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, ObjCTypes);
+}
+
+CodeGen::RValue CGObjCCommonMac::EmitLegacyMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ CallArgList ActualArgs;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Sel),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
+ // In 64bit ABI, type must be assumed VARARG. In 32bit abi,
+ // it seems not to matter.
+ const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo, (ObjCABI == 2));
+
+ llvm::Constant *Fn = NULL;
+ if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
+ : ObjCTypes.getSendStretFn(IsSuper);
+ } else if (ResultType->isFloatingType()) {
+ if (ObjCABI == 2) {
+ if (const BuiltinType *BT = ResultType->getAsBuiltinType()) {
+ BuiltinType::Kind k = BT->getKind();
+ Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFn2(IsSuper);
+ }
+ }
+ else
+ // FIXME. This currently matches gcc's API for x86-32. May need to change
+ // for others if we have their API.
+ Fn = ObjCTypes.getSendFpretFn(IsSuper);
+ } else {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+ : ObjCTypes.getSendFn(IsSuper);
+ }
+ assert(Fn && "EmitLegacyMessageSend - unknown API");
+ Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo, Fn, ActualArgs);
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ // FIXME: We shouldn't need this, the protocol decl should contain enough
+ // information to tell us whether this was a declaration or a definition.
+ DefinedProtocols.insert(PD->getIdentifier());
+
+ // If we have generated a forward reference to this protocol, emit
+ // it now. Otherwise do nothing, the protocol objects are lazily
+ // emitted.
+ if (Protocols.count(PD->getIdentifier()))
+ GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+ if (DefinedProtocols.count(PD->getIdentifier()))
+ return GetOrEmitProtocol(PD);
+ return GetOrEmitProtocolRef(PD);
+}
+
+/*
+ // APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+ struct _objc_protocol {
+ struct _objc_protocol_extension *isa;
+ char *protocol_name;
+ struct _objc_protocol_list *protocol_list;
+ struct _objc__method_prototype_list *instance_methods;
+ struct _objc__method_prototype_list *class_methods
+ };
+
+ See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ const char *ProtocolName = PD->getNameAsCString();
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(CGM.getContext()),
+ e = PD->instmeth_end(CGM.getContext()); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(CGM.getContext()),
+ e = PD->classmeth_end(CGM.getContext()); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] =
+ EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getNameAsString(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+ Values[3] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[4] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ std::string("\01L_OBJC_PROTOCOL_")+ProtocolName,
+ &CGM.getModule());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ Entry->setAlignment(4);
+ UsedGlobals.push_back(Entry);
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01L_OBJC_PROTOCOL_" + PD->getNameAsString(),
+ &CGM.getModule());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ Entry->setAlignment(4);
+ UsedGlobals.push_back(Entry);
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+/*
+ struct _objc_protocol_extension {
+ uint32_t size;
+ struct objc_method_description_list *optional_instance_methods;
+ struct objc_method_description_list *optional_class_methods;
+ struct objc_property_list *instance_properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods);
+ Values[2] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods);
+ Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" +
+ PD->getNameAsString(),
+ 0, PD, ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+ Values[3]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+ // No special section, but goes in llvm.used
+ return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getNameAsString(),
+ Init,
+ 0, 0, true);
+}
+
+/*
+ struct objc_protocol_list {
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+ // Just return null for empty protocol lists
+ if (ProtocolRefs.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+ std::vector<llvm::Constant*> Values(3);
+ // This field is only used by the runtime.
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[2] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ 4, false);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+/*
+ struct _objc_property {
+ const char * const name;
+ const char * const attributes;
+ };
+
+ struct _objc_property_list {
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
+ };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(const std::string &Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ std::vector<llvm::Constant*> Properties, Prop(2);
+ for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(CGM.getContext()),
+ E = OCD->prop_end(CGM.getContext()); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ Prop[0] = GetPropertyName(PD->getIdentifier());
+ Prop[1] = GetPropertyTypeString(PD, Container);
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
+ Prop));
+ }
+
+ // Return null for empty list.
+ if (Properties.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+ unsigned PropertySize =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+ Properties.size());
+ Values[2] = llvm::ConstantArray::get(AT, Properties);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" :
+ "__OBJC,__property,regular,no_dead_strip",
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+/*
+ struct objc_method_description_list {
+ int count;
+ struct objc_method_description list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(2);
+ Desc[0] = llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+ Desc);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodDescList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+ Methods.size());
+ Values[1] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+ struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ uint32_t size; // <rdar://4585769>
+ struct _objc_property_list *instance_properties;
+ };
+ */
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+ // FIXME: This is poor design, the OCD should have a pointer to the category
+ // decl. Additionally, note that Category can be null for the @implementation
+ // w/o an @interface case. Sema should just create one for us as it does for
+ // @implementation so everyone else can live life under a clear blue sky.
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ std::string ExtName(Interface->getNameAsString() + "_" +
+ OCD->getNameAsString());
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(CGM.getContext()),
+ e = OCD->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(CGM.getContext()),
+ e = OCD->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ std::vector<llvm::Constant*> Values(7);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ Values[1] = GetClassName(Interface->getIdentifier());
+ LazySymbols.insert(Interface->getIdentifier());
+ Values[2] =
+ EmitMethodList(std::string("\01L_OBJC_CATEGORY_INSTANCE_METHODS_") +
+ ExtName,
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] =
+ EmitMethodList(std::string("\01L_OBJC_CATEGORY_CLASS_METHODS_") + ExtName,
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ if (Category) {
+ Values[4] =
+ EmitProtocolList(std::string("\01L_OBJC_CATEGORY_PROTOCOLS_") + ExtName,
+ Category->protocol_begin(),
+ Category->protocol_end());
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ }
+ Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+ // If there is no category @interface then there can be no properties.
+ if (Category) {
+ Values[6] = EmitPropertyList(std::string("\01l_OBJC_$_PROP_LIST_") + ExtName,
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(std::string("\01L_OBJC_CATEGORY_")+ExtName, Init,
+ "__OBJC,__category,regular,no_dead_strip",
+ 4, true);
+ DefinedCategories.push_back(GV);
+}
+
+// FIXME: Get from somewhere?
+enum ClassFlags {
+ eClassFlags_Factory = 0x00001,
+ eClassFlags_Meta = 0x00002,
+ // <rdr://5142207>
+ eClassFlags_HasCXXStructors = 0x02000,
+ eClassFlags_Hidden = 0x20000,
+ eClassFlags_ABI2_Hidden = 0x00010,
+ eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+};
+
+/*
+ struct _objc_class {
+ Class isa;
+ Class super_class;
+ const char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct _objc_cache *cache;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions (<rdr://4585769>)
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+
+ See EmitClassExtension();
+ */
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ DefinedSymbols.insert(ID->getIdentifier());
+
+ std::string ClassName = ID->getNameAsString();
+ // FIXME: Gross
+ ObjCInterfaceDecl *Interface =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ llvm::Constant *Protocols =
+ EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getNameAsString(),
+ Interface->protocol_begin(),
+ Interface->protocol_end());
+ unsigned Flags = eClassFlags_Factory;
+ unsigned Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
+
+ // FIXME: Set CXX-structors flag.
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(CGM.getContext()),
+ e = ID->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(CGM.getContext()),
+ e = ID->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(CGM.getContext()),
+ e = ID->propimpl_end(CGM.getContext()); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(12);
+ Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+ // Record a reference to the super class.
+ LazySymbols.insert(Super->getIdentifier());
+
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, false);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ Values[10] = BuildIvarLayout(ID, true);
+ Values[11] = EmitClassExtension(ID);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(std::string("\01L_OBJC_CLASS_")+ClassName, Init,
+ "__OBJC,__class,regular,no_dead_strip",
+ 4, true);
+ DefinedClasses.push_back(GV);
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods) {
+ unsigned Flags = eClassFlags_Meta;
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> Values(12);
+ // The isa for the metaclass is the root of the hierarchy.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ Values[ 0] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ // The super class for the metaclass is emitted as the name of the
+ // super class. The runtime fixes this up to point to the
+ // *metaclass* for the super class.
+ if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, true);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip",
+ Methods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ // ivar_layout for metaclass is always NULL.
+ Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ // The class extension is always unused for metaclasses.
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ std::string Name("\01L_OBJC_METACLASS_");
+ Name += ID->getNameAsCString();
+
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ } else {
+ GV = new llvm::GlobalVariable(ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name,
+ &CGM.getModule());
+ }
+ GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+ GV->setAlignment(4);
+ UsedGlobals.push_back(GV);
+
+ return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+
+ // FIXME: Should we look these up somewhere other than the module. Its a bit
+ // silly since we only generate these while processing an implementation, so
+ // exactly one pointer would work if know when we entered/exitted an
+ // implementation block.
+
+ // Check for an existing forward reference.
+ // Previously, metaclass with internal linkage may have been defined.
+ // pass 'true' as 2nd argument so it is returned.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ return GV;
+ } else {
+ // Generate as an external reference to keep a consistent
+ // module. This will be patched up when we emit the metaclass.
+ return new llvm::GlobalVariable(ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name,
+ &CGM.getModule());
+ }
+}
+
+/*
+ struct objc_class_ext {
+ uint32_t size;
+ const char *weak_ivar_layout;
+ struct _objc_property_list *properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = BuildIvarLayout(ID, false);
+ Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getNameAsString(),
+ ID, ID->getClassInterface(), ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+ return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getNameAsString(),
+ Init, "__OBJC,__class_ext,regular,no_dead_strip",
+ 4, true);
+}
+
+/*
+ struct objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+
+ struct objc_ivar_list {
+ int ivar_count;
+ struct objc_ivar list[count];
+ };
+ */
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass) {
+ std::vector<llvm::Constant*> Ivars, Ivar(3);
+
+ // When emitting the root class GCC emits ivar entries for the
+ // actual class structure. It is not clear if we need to follow this
+ // behavior; for now lets try and get away with not doing it. If so,
+ // the cleanest solution would be to make up an ObjCInterfaceDecl
+ // for the class.
+ if (ForClass)
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ ObjCInterfaceDecl *OID =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ GetNamedIvarList(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ Ivar[0] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[1] = GetMethodVarType(IVD);
+ Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD));
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+ }
+
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+ Ivars.size());
+ Values[1] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV;
+ if (ForClass)
+ GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getNameAsString(),
+ Init, "__OBJC,__class_vars,regular,no_dead_strip",
+ 4, true);
+ else
+ GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_"
+ + ID->getNameAsString(),
+ Init, "__OBJC,__instance_vars,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+ struct objc_method {
+ SEL method_name;
+ char *method_types;
+ void *method;
+ };
+
+ struct objc_method_list {
+ struct objc_method_list *obsolete;
+ int count;
+ struct objc_method methods_list[count];
+ };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+ // FIXME: Use DenseMap::lookup
+ llvm::Function *Fn = MethodDefinitions[MD];
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ std::string Name;
+ GetNameForMethod(OMD, CD, Name);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ llvm::Function *Method =
+ llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+ MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+ return Method;
+}
+
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateMetadataVar(const std::string &Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed) {
+ const llvm::Type *Ty = Init->getType();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ if (Section)
+ GV->setSection(Section);
+ if (Align)
+ GV->setAlignment(Align);
+ if (AddToUsed)
+ UsedGlobals.push_back(GV);
+ return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+ // Abuse this interface function as a place to finalize.
+ FinishModule();
+
+ return NULL;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+}
+
+/*
+
+Objective-C setjmp-longjmp (sjlj) Exception Handling
+--
+
+The basic framework for a @try-catch-finally is as follows:
+{
+ objc_exception_data d;
+ id _rethrow = null;
+ bool _call_try_exit = true;
+
+ objc_exception_try_enter(&d);
+ if (!setjmp(d.jmp_buf)) {
+ ... try body ...
+ } else {
+ // exception path
+ id _caught = objc_exception_extract(&d);
+
+ // enter new try scope for handlers
+ if (!setjmp(d.jmp_buf)) {
+ ... match exception and execute catch blocks ...
+
+ // fell off end, rethrow.
+ _rethrow = _caught;
+ ... jump-through-finally to finally_rethrow ...
+ } else {
+ // exception in catch block
+ _rethrow = objc_exception_extract(&d);
+ _call_try_exit = false;
+ ... jump-through-finally to finally_rethrow ...
+ }
+ }
+ ... jump-through-finally to finally_end ...
+
+finally:
+ if (_call_try_exit)
+ objc_exception_try_exit(&d);
+
+ ... finally block ....
+ ... dispatch to finally destination ...
+
+finally_rethrow:
+ objc_exception_throw(_rethrow);
+
+finally_end:
+}
+
+This framework differs slightly from the one gcc uses, in that gcc
+uses _rethrow to determine if objc_exception_try_exit should be called
+and if the object should be rethrown. This breaks in the face of
+throwing nil and introduces unnecessary branches.
+
+We specialize this framework for a few particular circumstances:
+
+ - If there are no catch blocks, then we avoid emitting the second
+ exception handling context.
+
+ - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+ e)) we avoid emitting the code to rethrow an uncaught exception.
+
+ - FIXME: If there is no @finally block we can do a few more
+ simplifications.
+
+Rethrows and Jumps-Through-Finally
+--
+
+Support for implicit rethrows and jumping through the finally block is
+handled by storing the current exception-handling context in
+ObjCEHStack.
+
+In order to implement proper @finally semantics, we support one basic
+mechanism for jumping through the finally block to an arbitrary
+destination. Constructs which generate exits from a @try or @catch
+block use this mechanism to implement the proper semantics by chaining
+jumps, as necessary.
+
+This mechanism works like the one used for indirect goto: we
+arbitrarily assign an ID to each destination and store the ID for the
+destination in a variable prior to entering the finally block. At the
+end of the finally block we simply create a switch to the proper
+destination.
+
+Code gen for @synchronized(expr) stmt;
+Effectively generating code for:
+objc_sync_enter(expr);
+@try stmt @finally { objc_sync_exit(expr); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ // Create various blocks we refer to for handling @finally.
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit");
+ llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can safely avoid a temp here because jumps into
+ // @synchronized are illegal & this will dominate uses.
+ llvm::Value *SyncArg = 0;
+ if (!isTry) {
+ SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ }
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ CGF.ObjCEHValueStack.push_back(0);
+
+ // Allocate memory for the exception data and rethrow pointer.
+ llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ "exceptiondata.ptr");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
+ "_rethrow");
+ llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(llvm::Type::Int1Ty,
+ "_call_try_exit");
+ CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(), CallTryExitPtr);
+
+ // Enter a new try block and call setjmp.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+ llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0,
+ "jmpbufarray");
+ JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp");
+ llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+ JmpBufPtr, "result");
+
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"),
+ TryHandler, TryBlock);
+
+ // Emit the @try block.
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the "exception in @try" block.
+ CGF.EmitBlock(TryHandler);
+
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::Value *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ CGF.ObjCEHValueStack.back() = Caught;
+ if (!isTry)
+ {
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(), CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ else if (const ObjCAtCatchStmt* CatchStmt =
+ cast<ObjCAtTryStmt>(S).getCatchStmts())
+ {
+ // Enter a new exception try block (in case a @catch block throws
+ // an exception).
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+
+ llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+ JmpBufPtr, "result");
+ llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw");
+
+ llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
+ llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+
+ // Handle catch list. As a special case we check if everything is
+ // matched and avoid generating code for falling off the end if
+ // so.
+ bool AllMatched = false;
+ for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
+
+ const ParmVarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const PointerType *PT = 0;
+
+ // catch(...) always matches.
+ if (!CatchParam) {
+ AllMatched = true;
+ } else {
+ PT = CatchParam->getType()->getAsPointerType();
+
+ // catch(id e) always matches.
+ // FIXME: For the time being we also match id<X>; this should
+ // be rejected by Sema instead.
+ if ((PT && CGF.getContext().isObjCIdStructType(PT->getPointeeType())) ||
+ CatchParam->getType()->isObjCQualifiedIdType())
+ AllMatched = true;
+ }
+
+ if (AllMatched) {
+ if (CatchParam) {
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+ CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+ break;
+ }
+
+ assert(PT && "Unexpected non-pointer type in @catch");
+ QualType T = PT->getPointeeType();
+ const ObjCInterfaceType *ObjCType = T->getAsObjCInterfaceType();
+ assert(ObjCType && "Catch parameter must have Objective-C type!");
+
+ // Check if the @catch block matches the exception object.
+ llvm::Value *Class = EmitClassRef(CGF.Builder, ObjCType->getDecl());
+
+ llvm::Value *Match =
+ CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
+ Class, Caught, "match");
+
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched");
+
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+ MatchedBlock, NextCatchBlock);
+
+ // Emit the @catch block.
+ CGF.EmitBlock(MatchedBlock);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ llvm::Value *Tmp =
+ CGF.Builder.CreateBitCast(Caught, CGF.ConvertType(CatchParam->getType()),
+ "tmp");
+ CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(NextCatchBlock);
+ }
+
+ if (!AllMatched) {
+ // None of the handlers caught the exception, so store it to be
+ // rethrown at the end of the @finally block.
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData),
+ RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(), CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ } else {
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(), CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ // Pop the exception-handling stack entry. It is important to do
+ // this now, because the code in the @finally block is not in this
+ // context.
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Emit the @finally block.
+ CGF.EmitBlock(FinallyBlock);
+ llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp");
+
+ CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit);
+
+ CGF.EmitBlock(FinallyExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData);
+
+ CGF.EmitBlock(FinallyNoExit);
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ }
+
+ // Emit the switch block
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ CGF.EmitBlock(FinallyRethrow);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj)
+{
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, "assignivar");
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+ Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+/// unsigned version;
+/// unsigned flags;
+/// };
+enum ImageInfoFlags {
+ eImageInfo_FixAndContinue = (1 << 0), // FIXME: Not sure what
+ // this implies.
+ eImageInfo_GarbageCollected = (1 << 1),
+ eImageInfo_GCOnly = (1 << 2),
+ eImageInfo_OptimizedByDyld = (1 << 3), // FIXME: When is this set.
+
+ // A flag indicating that the module has no instances of an
+ // @synthesize of a superclass variable. <rdar://problem/6803242>
+ eImageInfo_CorrectedSynthesize = (1 << 4)
+};
+
+void CGObjCMac::EmitImageInfo() {
+ unsigned version = 0; // Version is unused?
+ unsigned flags = 0;
+
+ // FIXME: Fix and continue?
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+ flags |= eImageInfo_GarbageCollected;
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ flags |= eImageInfo_GCOnly;
+
+ // We never allow @synthesize of a superclass property.
+ flags |= eImageInfo_CorrectedSynthesize;
+
+ // Emitted as int[2];
+ llvm::Constant *values[2] = {
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, version),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, flags)
+ };
+ llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::Int32Ty, 2);
+
+ const char *Section;
+ if (ObjCABI == 1)
+ Section = "__OBJC, __image_info,regular";
+ else
+ Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
+ llvm::ConstantArray::get(AT, values, 2),
+ Section,
+ 0,
+ true);
+ GV->setConstant(true);
+}
+
+
+// struct objc_module {
+// unsigned long version;
+// unsigned long size;
+// const char *name;
+// Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+ uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ Values[2] = GetClassName(&CGM.getContext().Idents.get(""));
+ Values[3] = EmitModuleSymbols();
+ CreateMetadataVar("\01L_OBJC_MODULES",
+ llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ "__OBJC,__module_info,regular,no_dead_strip",
+ 4, true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+ unsigned NumClasses = DefinedClasses.size();
+ unsigned NumCategories = DefinedCategories.size();
+
+ // Return null if no symbols were defined.
+ if (!NumClasses && !NumCategories)
+ return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+ Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+ Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+ // The runtime expects exactly the list of defined classes followed
+ // by the list of defined categories, in a single array.
+ std::vector<llvm::Constant*> Symbols(NumClasses + NumCategories);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+ ObjCTypes.Int8PtrTy);
+ for (unsigned i=0; i<NumCategories; i++)
+ Symbols[NumClasses + i] =
+ llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+ ObjCTypes.Int8PtrTy);
+
+ Values[4] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses + NumCategories),
+ Symbols);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
+ "__OBJC,__symbols,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ LazySymbols.insert(ID->getIdentifier());
+
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = ClassNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(Ident->getName()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+static QualType::GCAttrTypes GetGCAttrTypeForType(ASTContext &Ctx,
+ QualType FQT) {
+ if (FQT.isObjCGCStrong())
+ return QualType::Strong;
+
+ if (FQT.isObjCGCWeak())
+ return QualType::Weak;
+
+ if (Ctx.isObjCObjectPointerType(FQT))
+ return QualType::Strong;
+
+ if (const PointerType *PT = FQT->getAsPointerType())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+ return QualType::GCNone;
+}
+
+void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos,
+ bool ForStrongLayout,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ // FIXME - Use iterator.
+ llvm::SmallVector<FieldDecl*, 16> Fields(RD->field_begin(CGM.getContext()),
+ RD->field_end(CGM.getContext()));
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
+ ForStrongLayout, HasUnion);
+}
+
+void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ uint64_t MaxUnionIvarSize = 0;
+ uint64_t MaxSkippedUnionIvarSize = 0;
+ FieldDecl *MaxField = 0;
+ FieldDecl *MaxSkippedField = 0;
+ FieldDecl *LastFieldBitfield = 0;
+ uint64_t MaxFieldOffset = 0;
+ uint64_t MaxSkippedFieldOffset = 0;
+ uint64_t LastBitfieldOffset = 0;
+
+ if (RecFields.empty())
+ return;
+ unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ FieldDecl *Field = RecFields[i];
+ uint64_t FieldOffset;
+ if (RD)
+ FieldOffset =
+ Layout->getElementOffset(CGM.getTypes().getLLVMFieldNo(Field));
+ else
+ FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfield = Field;
+ LastBitfieldOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfield = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildAggrIvarRecordLayout(FQT->getAsRecordType(),
+ BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType()) {
+ int OldIndex = IvarsInfo.size() - 1;
+ int OldSkIndex = SkipIvars.size() -1;
+
+ const RecordType *RT = FQT->getAsRecordType();
+ BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = IvarsInfo.size() - 1,
+ FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
+ IvarsInfo[i].ivar_size));
+ for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
+ SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
+ SkipIvars[i].ivar_size));
+ }
+ continue;
+ }
+ }
+ // At this point, we are done with Record/Union and array there of.
+ // For other arrays we are down to its element type.
+ QualType::GCAttrTypes GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
+
+ unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
+ if ((ForStrongLayout && GCAttr == QualType::Strong)
+ || (!ForStrongLayout && GCAttr == QualType::Weak)) {
+ if (IsUnion) {
+ uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
+ if (UnionIvarSize > MaxUnionIvarSize) {
+ MaxUnionIvarSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / WordSizeInBits));
+ }
+ } else if ((ForStrongLayout &&
+ (GCAttr == QualType::GCNone || GCAttr == QualType::Weak))
+ || (!ForStrongLayout && GCAttr != QualType::Weak)) {
+ if (IsUnion) {
+ // FIXME: Why the asymmetry? We divide by word size in bits on other
+ // side.
+ uint64_t UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxSkippedUnionIvarSize) {
+ MaxSkippedUnionIvarSize = UnionIvarSize;
+ MaxSkippedField = Field;
+ MaxSkippedFieldOffset = FieldOffset;
+ }
+ } else {
+ // FIXME: Why the asymmetry, we divide by byte size in bits here?
+ SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+ }
+
+ if (LastFieldBitfield) {
+ // Last field was a bitfield. Must update skip info.
+ Expr *BitWidth = LastFieldBitfield->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ }
+
+ if (MaxField)
+ IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
+ MaxUnionIvarSize));
+ if (MaxSkippedField)
+ SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
+ MaxSkippedUnionIvarSize));
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+ const ObjCImplementationDecl *OMD,
+ bool ForStrongLayout) {
+ bool hasUnion = false;
+
+ unsigned int WordsToScan, WordsToSkip;
+ const llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return llvm::Constant::getNullValue(PtrTy);
+
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ CGM.getContext().CollectObjCIvars(OI, RecFields);
+
+ // Add this implementations synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().CollectSynthesizedIvars(OI, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+
+ if (RecFields.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ SkipIvars.clear();
+ IvarsInfo.clear();
+
+ BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+ if (IvarsInfo.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ // Sort on byte position in case we encounterred a union nested in
+ // the ivar list.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ // Build the string of skip/scan nibbles
+ llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+ unsigned int WordSize =
+ CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ if (IvarsInfo[0].ivar_bytepos == 0) {
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ } else {
+ WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ }
+ for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
+ unsigned int TailPrevGCObjC =
+ IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+ if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
+ // consecutive 'scanned' object pointers.
+ WordsToScan += IvarsInfo[i].ivar_size;
+ } else {
+ // Skip over 'gc'able object pointer which lay over each other.
+ if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
+ continue;
+ // Must skip over 1 or more words. We save current skip/scan values
+ // and start a new pair.
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+
+ // Skip the hole.
+ SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[i].ivar_size;
+ }
+ }
+ if (WordsToScan > 0) {
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+ }
+
+ bool BytesSkipped = false;
+ if (!SkipIvars.empty()) {
+ unsigned int LastIndex = SkipIvars.size()-1;
+ int LastByteSkipped =
+ SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+ LastIndex = IvarsInfo.size()-1;
+ int LastByteScanned =
+ IvarsInfo[LastIndex].ivar_bytepos +
+ IvarsInfo[LastIndex].ivar_size * WordSize;
+ BytesSkipped = (LastByteSkipped > LastByteScanned);
+ // Compute number of bytes to skip at the tail end of the last ivar scanned.
+ if (BytesSkipped) {
+ unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
+ SKIP_SCAN SkScan;
+ SkScan.skip = TotalWords - (LastByteScanned/WordSize);
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ }
+ }
+ // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
+ // as 0xMN.
+ int SkipScan = SkipScanIvars.size()-1;
+ for (int i = 0; i <= SkipScan; i++) {
+ if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
+ && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
+ // 0xM0 followed by 0x0N detected.
+ SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
+ for (int j = i+1; j < SkipScan; j++)
+ SkipScanIvars[j] = SkipScanIvars[j+1];
+ --SkipScan;
+ }
+ }
+
+ // Generate the string.
+ std::string BitMap;
+ for (int i = 0; i <= SkipScan; i++) {
+ unsigned char byte;
+ unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
+ unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
+ unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
+ unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
+
+ if (skip_small > 0 || skip_big > 0)
+ BytesSkipped = true;
+ // first skip big.
+ for (unsigned int ix = 0; ix < skip_big; ix++)
+ BitMap += (unsigned char)(0xf0);
+
+ // next (skip small, scan)
+ if (skip_small) {
+ byte = skip_small << 4;
+ if (scan_big > 0) {
+ byte |= 0xf;
+ --scan_big;
+ } else if (scan_small) {
+ byte |= scan_small;
+ scan_small = 0;
+ }
+ BitMap += byte;
+ }
+ // next scan big
+ for (unsigned int ix = 0; ix < scan_big; ix++)
+ BitMap += (unsigned char)(0x0f);
+ // last scan small
+ if (scan_small) {
+ byte = scan_small;
+ BitMap += byte;
+ }
+ }
+ // null terminate string.
+ unsigned char zero = 0;
+ BitMap += zero;
+
+ if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ printf("\n%s ivar layout for class '%s': ",
+ ForStrongLayout ? "strong" : "weak",
+ OMD->getClassInterface()->getNameAsCString());
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0; i < BitMap.size(); i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+
+ // if ivar_layout bitmap is all 1 bits (nothing skipped) then use NULL as
+ // final layout.
+ if (ForStrongLayout && !BytesSkipped)
+ return llvm::Constant::getNullValue(PtrTy);
+ llvm::GlobalVariable * Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(BitMap.c_str()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+ return getConstantGEP(Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+ llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+ // FIXME: Avoid std::string copying.
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
+ llvm::ConstantArray::get(Sel.getAsString()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+ return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(const std::string &Name) {
+ return GetMethodVarName(&CGM.getContext().Idents.get(Name));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(const_cast<ObjCMethodDecl*>(D),
+ TypeStr);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
+ llvm::ConstantArray::get(Ident->getName()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+ CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+ const ObjCContainerDecl *CD,
+ std::string &NameOut) {
+ NameOut = '\01';
+ NameOut += (D->isInstanceMethod() ? '-' : '+');
+ NameOut += '[';
+ assert (CD && "Missing container decl in GetNameForMethod");
+ NameOut += CD->getNameAsString();
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext())) {
+ NameOut += '(';
+ NameOut += CID->getNameAsString();
+ NameOut+= ')';
+ }
+ NameOut += ' ';
+ NameOut += D->getSelector().getAsString();
+ NameOut += ']';
+}
+
+void CGObjCMac::FinishModule() {
+ EmitModuleInfo();
+
+ // Emit the dummy bodies for any protocols which were referenced but
+ // never defined.
+ for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+ i = Protocols.begin(), e = Protocols.end(); i != e; ++i) {
+ if (i->second->hasInitializer())
+ continue;
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ Values[1] = GetClassName(i->first);
+ Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[3] = Values[4] =
+ llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+ i->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+ i->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values));
+ }
+
+ std::vector<llvm::Constant*> Used;
+ for (std::vector<llvm::GlobalVariable*>::iterator i = UsedGlobals.begin(),
+ e = UsedGlobals.end(); i != e; ++i) {
+ Used.push_back(llvm::ConstantExpr::getBitCast(*i, ObjCTypes.Int8PtrTy));
+ }
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy, Used.size());
+ llvm::GlobalValue *GV =
+ new llvm::GlobalVariable(AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Used),
+ "llvm.used",
+ &CGM.getModule());
+
+ GV->setSection("llvm.metadata");
+
+ // Add assembler directives to add lazy undefined symbol references
+ // for classes which are referenced but not defined. This is
+ // important for correct linker interaction.
+
+ // FIXME: Uh, this isn't particularly portable.
+ std::stringstream s;
+
+ if (!CGM.getModule().getModuleInlineAsm().empty())
+ s << "\n";
+
+ for (std::set<IdentifierInfo*>::iterator i = LazySymbols.begin(),
+ e = LazySymbols.end(); i != e; ++i) {
+ s << "\t.lazy_reference .objc_class_name_" << (*i)->getName() << "\n";
+ }
+ for (std::set<IdentifierInfo*>::iterator i = DefinedSymbols.begin(),
+ e = DefinedSymbols.end(); i != e; ++i) {
+ s << "\t.objc_class_name_" << (*i)->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << (*i)->getName() << "\n";
+ }
+
+ CGM.getModule().appendModuleInlineAsm(s.str());
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm)
+{
+ ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
+ ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+: CGM(cgm)
+{
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ ShortTy = Types.ConvertType(Ctx.ShortTy);
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ LongTy = Types.ConvertType(Ctx.LongTy);
+ LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ Int8PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+ PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+ // FIXME: It would be nice to unify this with the opaque type, so that the IR
+ // comes out a bit cleaner.
+ const llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+
+ // I'm not sure I like this. The implicit coordination is a bit
+ // gross. We should solve this in a reasonable fashion because this
+ // is a pretty common task (match some runtime data structure with
+ // an LLVM data structure).
+
+ // FIXME: This is leaked.
+ // FIXME: Merge with rewriter code?
+
+ // struct _objc_super {
+ // id self;
+ // Class cls;
+ // }
+ RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+ SourceLocation(),
+ &Ctx.Idents.get("_objc_super"));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCIdType(), 0, false));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCClassType(), 0, false));
+ RD->completeDefinition(Ctx);
+
+ SuperCTy = Ctx.getTagDeclType(RD);
+ SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+ SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+ SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+ // struct _prop_t {
+ // char *name;
+ // char *attributes;
+ // }
+ PropertyTy = llvm::StructType::get(Int8PtrTy, Int8PtrTy, NULL);
+ CGM.getModule().addTypeName("struct._prop_t",
+ PropertyTy);
+
+ // struct _prop_list_t {
+ // uint32_t entsize; // sizeof(struct _prop_t)
+ // uint32_t count_of_properties;
+ // struct _prop_t prop_list[count_of_properties];
+ // }
+ PropertyListTy = llvm::StructType::get(IntTy,
+ IntTy,
+ llvm::ArrayType::get(PropertyTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._prop_list_t",
+ PropertyListTy);
+ // struct _prop_list_t *
+ PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+ // struct _objc_method {
+ // SEL _cmd;
+ // char *method_type;
+ // char *_imp;
+ // }
+ MethodTy = llvm::StructType::get(SelectorPtrTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method", MethodTy);
+
+ // struct _objc_cache *
+ CacheTy = llvm::OpaqueType::get();
+ CGM.getModule().addTypeName("struct._objc_cache", CacheTy);
+ CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm)
+{
+ // struct _objc_method_description {
+ // SEL name;
+ // char *types;
+ // }
+ MethodDescriptionTy =
+ llvm::StructType::get(SelectorPtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description",
+ MethodDescriptionTy);
+
+ // struct _objc_method_description_list {
+ // int count;
+ // struct _objc_method_description[1];
+ // }
+ MethodDescriptionListTy =
+ llvm::StructType::get(IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description_list",
+ MethodDescriptionListTy);
+
+ // struct _objc_method_description_list *
+ MethodDescriptionListPtrTy =
+ llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+ // Protocol description structures
+
+ // struct _objc_protocol_extension {
+ // uint32_t size; // sizeof(struct _objc_protocol_extension)
+ // struct _objc_method_description_list *optional_instance_methods;
+ // struct _objc_method_description_list *optional_class_methods;
+ // struct _objc_property_list *instance_properties;
+ // }
+ ProtocolExtensionTy =
+ llvm::StructType::get(IntTy,
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_extension",
+ ProtocolExtensionTy);
+
+ // struct _objc_protocol_extension *
+ ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+ // Handle recursive construction of Protocol and ProtocolList types
+
+ llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get();
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get();
+
+ const llvm::Type *T =
+ llvm::StructType::get(llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ LongTy,
+ llvm::ArrayType::get(ProtocolTyHolder, 0),
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T);
+
+ // struct _objc_protocol {
+ // struct _objc_protocol_extension *isa;
+ // char *protocol_name;
+ // struct _objc_protocol **_objc_protocol_list;
+ // struct _objc_method_description_list *instance_methods;
+ // struct _objc_method_description_list *class_methods;
+ // }
+ T = llvm::StructType::get(ProtocolExtensionPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T);
+
+ ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListTy);
+ // struct _objc_protocol_list *
+ ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+ ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy);
+ ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+ // Class description structures
+
+ // struct _objc_ivar {
+ // char *ivar_name;
+ // char *ivar_type;
+ // int ivar_offset;
+ // }
+ IvarTy = llvm::StructType::get(Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_ivar", IvarTy);
+
+ // struct _objc_ivar_list *
+ IvarListTy = llvm::OpaqueType::get();
+ CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy);
+ IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+ // struct _objc_method_list *
+ MethodListTy = llvm::OpaqueType::get();
+ CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy);
+ MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+ // struct _objc_class_extension *
+ ClassExtensionTy =
+ llvm::StructType::get(IntTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy);
+ ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get();
+
+ // struct _objc_class {
+ // Class isa;
+ // Class super_class;
+ // char *name;
+ // long version;
+ // long info;
+ // long instance_size;
+ // struct _objc_ivar_list *ivars;
+ // struct _objc_method_list *methods;
+ // struct _objc_cache *cache;
+ // struct _objc_protocol_list *protocols;
+ // char *ivar_layout;
+ // struct _objc_class_ext *ext;
+ // };
+ T = llvm::StructType::get(llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T);
+
+ ClassTy = cast<llvm::StructType>(ClassTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_class", ClassTy);
+ ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+ // struct _objc_category {
+ // char *category_name;
+ // char *class_name;
+ // struct _objc_method_list *instance_method;
+ // struct _objc_method_list *class_method;
+ // uint32_t size; // sizeof(struct _objc_category)
+ // struct _objc_property_list *instance_properties;// category's @property
+ // }
+ CategoryTy = llvm::StructType::get(Int8PtrTy,
+ Int8PtrTy,
+ MethodListPtrTy,
+ MethodListPtrTy,
+ ProtocolListPtrTy,
+ IntTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_category", CategoryTy);
+
+ // Global metadata structures
+
+ // struct _objc_symtab {
+ // long sel_ref_cnt;
+ // SEL *refs;
+ // short cls_def_cnt;
+ // short cat_def_cnt;
+ // char *defs[cls_def_cnt + cat_def_cnt];
+ // }
+ SymtabTy = llvm::StructType::get(LongTy,
+ SelectorPtrTy,
+ ShortTy,
+ ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy);
+ SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+ // struct _objc_module {
+ // long version;
+ // long size; // sizeof(struct _objc_module)
+ // char *name;
+ // struct _objc_symtab* symtab;
+ // }
+ ModuleTy =
+ llvm::StructType::get(LongTy,
+ LongTy,
+ Int8PtrTy,
+ SymtabPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_module", ModuleTy);
+
+
+ // FIXME: This is the size of the setjmp buffer and should be target
+ // specific. 18 is what's used on 32-bit X86.
+ uint64_t SetJmpBufferSize = 18;
+
+ // Exceptions
+ const llvm::Type *StackPtrTy =
+ llvm::ArrayType::get(llvm::PointerType::getUnqual(llvm::Type::Int8Ty), 4);
+
+ ExceptionDataTy =
+ llvm::StructType::get(llvm::ArrayType::get(llvm::Type::Int32Ty,
+ SetJmpBufferSize),
+ StackPtrTy, NULL);
+ CGM.getModule().addTypeName("struct._objc_exception_data",
+ ExceptionDataTy);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+: ObjCCommonTypesHelper(cgm)
+{
+ // struct _method_list_t {
+ // uint32_t entsize; // sizeof(struct _objc_method)
+ // uint32_t method_count;
+ // struct _objc_method method_list[method_count];
+ // }
+ MethodListnfABITy = llvm::StructType::get(IntTy,
+ IntTy,
+ llvm::ArrayType::get(MethodTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct.__method_list_t",
+ MethodListnfABITy);
+ // struct method_list_t *
+ MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+ // struct _protocol_t {
+ // id isa; // NULL
+ // const char * const protocol_name;
+ // const struct _protocol_list_t * protocol_list; // super protocols
+ // const struct method_list_t * const instance_methods;
+ // const struct method_list_t * const class_methods;
+ // const struct method_list_t *optionalInstanceMethods;
+ // const struct method_list_t *optionalClassMethods;
+ // const struct _prop_list_t * properties;
+ // const uint32_t size; // sizeof(struct _protocol_t)
+ // const uint32_t flags; // = 0
+ // }
+
+ // Holder for struct _protocol_list_t *
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get();
+
+ ProtocolnfABITy = llvm::StructType::get(ObjectPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(
+ ProtocolListTyHolder),
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ PropertyListPtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._protocol_t",
+ ProtocolnfABITy);
+
+ // struct _protocol_t*
+ ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+ // struct _protocol_list_t {
+ // long protocol_count; // Note, this is 32/64 bit
+ // struct _protocol_t *[protocol_count];
+ // }
+ ProtocolListnfABITy = llvm::StructType::get(LongTy,
+ llvm::ArrayType::get(
+ ProtocolnfABIPtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListnfABITy);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(
+ ProtocolListnfABITy);
+
+ // struct _objc_protocol_list*
+ ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+ // struct _ivar_t {
+ // unsigned long int *offset; // pointer to ivar offset location
+ // char *name;
+ // char *type;
+ // uint32_t alignment;
+ // uint32_t size;
+ // }
+ IvarnfABITy = llvm::StructType::get(llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy);
+
+ // struct _ivar_list_t {
+ // uint32 entsize; // sizeof(struct _ivar_t)
+ // uint32 count;
+ // struct _iver_t list[count];
+ // }
+ IvarListnfABITy = llvm::StructType::get(IntTy,
+ IntTy,
+ llvm::ArrayType::get(
+ IvarnfABITy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy);
+
+ IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+ // struct _class_ro_t {
+ // uint32_t const flags;
+ // uint32_t const instanceStart;
+ // uint32_t const instanceSize;
+ // uint32_t const reserved; // only when building for 64bit targets
+ // const uint8_t * const ivarLayout;
+ // const char *const name;
+ // const struct _method_list_t * const baseMethods;
+ // const struct _objc_protocol_list *const baseProtocols;
+ // const struct _ivar_list_t *const ivars;
+ // const uint8_t * const weakIvarLayout;
+ // const struct _prop_list_t * const properties;
+ // }
+
+ // FIXME. Add 'reserved' field in 64bit abi mode!
+ ClassRonfABITy = llvm::StructType::get(IntTy,
+ IntTy,
+ IntTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._class_ro_t",
+ ClassRonfABITy);
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ ImpnfABITy = llvm::PointerType::getUnqual(
+ llvm::FunctionType::get(ObjectPtrTy, Params, false));
+
+ // struct _class_t {
+ // struct _class_t *isa;
+ // struct _class_t * const superclass;
+ // void *cache;
+ // IMP *vtable;
+ // struct class_ro_t *ro;
+ // }
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get();
+ ClassnfABITy = llvm::StructType::get(llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(
+ ClassRonfABITy),
+ NULL);
+ CGM.getModule().addTypeName("struct._class_t", ClassnfABITy);
+
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(
+ ClassnfABITy);
+
+ // LLVM for struct _class_t *
+ ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+ // struct _category_t {
+ // const char * const name;
+ // struct _class_t *const cls;
+ // const struct _method_list_t * const instance_methods;
+ // const struct _method_list_t * const class_methods;
+ // const struct _protocol_list_t * const protocols;
+ // const struct _prop_list_t * const properties;
+ // }
+ CategorynfABITy = llvm::StructType::get(Int8PtrTy,
+ ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._category_t", CategorynfABITy);
+
+ // New types for nonfragile abi messaging.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+
+ // First the clang type for struct _message_ref_t
+ RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+ SourceLocation(),
+ &Ctx.Idents.get("_message_ref_t"));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.VoidPtrTy, 0, false));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCSelType(), 0, false));
+ RD->completeDefinition(Ctx);
+
+ MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+ MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ SuperMessageRefTy = llvm::StructType::get(ImpnfABITy,
+ SelectorPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy);
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+ // struct objc_typeinfo {
+ // const void** vtable; // objc_ehtype_vtable + 2
+ // const char* name; // c++ typeinfo string
+ // Class cls;
+ // };
+ EHTypeTy = llvm::StructType::get(llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy);
+ EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+ FinishNonFragileABIModule();
+
+ return NULL;
+}
+
+void CGObjCNonFragileABIMac::AddModuleClassList(const
+ std::vector<llvm::GlobalValue*>
+ &Container,
+ const char *SymbolName,
+ const char *SectionName) {
+ unsigned NumClasses = Container.size();
+
+ if (!NumClasses)
+ return;
+
+ std::vector<llvm::Constant*> Symbols(NumClasses);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+ ObjCTypes.Int8PtrTy);
+ llvm::Constant* Init =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses),
+ Symbols);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ SymbolName,
+ &CGM.getModule());
+ GV->setAlignment(8);
+ GV->setSection(SectionName);
+ UsedGlobals.push_back(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+ // nonfragile abi has no module definition.
+
+ // Build list of all implemented class addresses in array
+ // L_OBJC_LABEL_CLASS_$.
+ AddModuleClassList(DefinedClasses,
+ "\01L_OBJC_LABEL_CLASS_$",
+ "__DATA, __objc_classlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyClasses,
+ "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+ // Build list of all implemented category addresses in array
+ // L_OBJC_LABEL_CATEGORY_$.
+ AddModuleClassList(DefinedCategories,
+ "\01L_OBJC_LABEL_CATEGORY_$",
+ "__DATA, __objc_catlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyCategories,
+ "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+ // static int L_OBJC_IMAGE_INFO[2] = { 0, flags };
+ // FIXME. flags can be 0 | 1 | 2 | 6. For now just use 0
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, 0);
+ unsigned int flags = 0;
+ // FIXME: Fix and continue?
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+ flags |= eImageInfo_GarbageCollected;
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ flags |= eImageInfo_GCOnly;
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ llvm::Constant* Init = llvm::ConstantArray::get(
+ llvm::ArrayType::get(ObjCTypes.IntTy, 2),
+ Values);
+ llvm::GlobalVariable *IMGV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ "\01L_OBJC_IMAGE_INFO",
+ &CGM.getModule());
+ IMGV->setSection("__DATA, __objc_imageinfo, regular, no_dead_strip");
+ IMGV->setConstant(true);
+ UsedGlobals.push_back(IMGV);
+
+ std::vector<llvm::Constant*> Used;
+
+ for (std::vector<llvm::GlobalVariable*>::iterator i = UsedGlobals.begin(),
+ e = UsedGlobals.end(); i != e; ++i) {
+ Used.push_back(llvm::ConstantExpr::getBitCast(*i, ObjCTypes.Int8PtrTy));
+ }
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy, Used.size());
+ llvm::GlobalValue *GV =
+ new llvm::GlobalVariable(AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Used),
+ "llvm.used",
+ &CGM.getModule());
+
+ GV->setSection("llvm.metadata");
+
+}
+
+/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+/// NonLegacyDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+///
+bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
+ if (NonLegacyDispatchMethods.empty()) {
+ NonLegacyDispatchMethods.insert(GetNullarySelector("alloc"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("class"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("self"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("length"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("count"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("retain"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("release"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("autorelease"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("hash"));
+
+ NonLegacyDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqual"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("addObject"));
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ NonLegacyDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+ return (NonLegacyDispatchMethods.count(Sel) == 0);
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20
+};
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+/// uint32_t const flags;
+/// uint32_t const instanceStart;
+/// uint32_t const instanceSize;
+/// uint32_t const reserved; // only when building for 64bit targets
+/// const uint8_t * const ivarLayout;
+/// const char *const name;
+/// const struct _method_list_t * const baseMethods;
+/// const struct _protocol_list_t *const baseProtocols;
+/// const struct _ivar_list_t *const ivars;
+/// const uint8_t * const weakIvarLayout;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+ unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+ Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+ Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+ // FIXME. For 64bit targets add 0 here.
+ Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, true);
+ Values[ 4] = GetClassName(ID->getIdentifier());
+ // const struct _method_list_t * const baseMethods;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName("\01l_OBJC_$_");
+ if (flags & CLS_META) {
+ MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(CGM.getContext()),
+ e = ID->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ } else {
+ MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(CGM.getContext()),
+ e = ID->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(CGM.getContext()),
+ e = ID->propimpl_end(CGM.getContext()); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ }
+ }
+ }
+ Values[ 5] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const", Methods);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+ Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ + OID->getNameAsString(),
+ OID->protocol_begin(),
+ OID->protocol_end());
+
+ if (flags & CLS_META)
+ Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ else
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, false);
+ if (flags & CLS_META)
+ Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ else
+ Values[ 9] =
+ EmitPropertyList(
+ "\01l_OBJC_$_PROP_LIST_" + ID->getNameAsString(),
+ ID, ID->getClassInterface(), ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+ Values);
+ llvm::GlobalVariable *CLASS_RO_GV =
+ new llvm::GlobalVariable(ObjCTypes.ClassRonfABITy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ (flags & CLS_META) ?
+ std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+ std::string("\01l_OBJC_CLASS_RO_$_")+ClassName,
+ &CGM.getModule());
+ CLASS_RO_GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassRonfABITy));
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
+ return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t * const superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
+ std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = IsAGV;
+ Values[1] = SuperClassGV
+ ? SuperClassGV
+ : llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ Values[2] = ObjCEmptyCacheVar; // &ObjCEmptyCacheVar
+ Values[3] = ObjCEmptyVtableVar; // &ObjCEmptyVtableVar
+ Values[4] = ClassRoGV; // &CLASS_RO_GV
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+ Values);
+ llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+ GV->setInitializer(Init);
+ GV->setSection("__DATA, __objc_data");
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassnfABITy));
+ if (HiddenVisibility)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(CGM.getContext(), GetNullarySelector("load")) != 0;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize) {
+ const ASTRecordLayout &RL =
+ CGM.getContext().getASTObjCImplementationLayout(OID);
+
+ // InstanceSize is really instance end.
+ InstanceSize = llvm::RoundUpToAlignment(RL.getNextOffset(), 8) / 8;
+
+ // If there are no fields, the start is the same as the end.
+ if (!RL.getFieldCount())
+ InstanceStart = InstanceSize;
+ else
+ InstanceStart = RL.getFieldOffset(0) / 8;
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ if (!ObjCEmptyCacheVar) {
+ ObjCEmptyCacheVar = new llvm::GlobalVariable(
+ ObjCTypes.CacheTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_cache",
+ &CGM.getModule());
+
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_vtable",
+ &CGM.getModule());
+ }
+ assert(ID->getClassInterface() &&
+ "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+ // FIXME: Is this correct (that meta class size is never computed)?
+ uint32_t InstanceStart =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ uint32_t InstanceSize = InstanceStart;
+ uint32_t flags = CLS_META;
+ std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
+ std::string ObjCClassName(getClassSymbolPrefix());
+
+ llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+ bool classIsHidden =
+ CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (!ID->getClassInterface()->getSuperClass()) {
+ // class is root
+ flags |= CLS_ROOT;
+ SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
+ IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+ } else {
+ // Has a root. Current class is not a root.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
+ // work on super class metadata symbol.
+ std::string SuperClassName =
+ ObjCMetaClassName + ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(SuperClassName);
+ }
+ llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,ID);
+ std::string TClassName = ObjCMetaClassName + ClassName;
+ llvm::GlobalVariable *MetaTClass =
+ BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+
+ // Metadata for the class
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (hasObjCExceptionAttribute(ID->getClassInterface()))
+ flags |= CLS_EXCEPTION;
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ flags |= CLS_ROOT;
+ SuperClassGV = 0;
+ } else {
+ // Has a root. Current class is not a root.
+ std::string RootClassName =
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
+ }
+ GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+ CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,
+ ID);
+
+ TClassName = ObjCClassName + ClassName;
+ llvm::GlobalVariable *ClassMD =
+ BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedClasses.push_back(ClassMD);
+
+ // Determine if this class is also "non-lazy".
+ if (ImplementationIsNonLazy(ID))
+ DefinedNonLazyClasses.push_back(ClassMD);
+
+ // Force the definition of the EHType if necessary.
+ if (flags & CLS_EXCEPTION)
+ GetInterfaceEHType(ID->getClassInterface(), true);
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+/// @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+
+ // This routine is called for @protocol only. So, we must build definition
+ // of protocol's meta-data (not a reference to it!)
+ //
+ llvm::Constant *Init = llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+
+ std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ ProtocolName += PD->getNameAsCString();
+
+ llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+ if (PTGV)
+ return Builder.CreateLoad(PTGV, false, "tmp");
+ PTGV = new llvm::GlobalVariable(
+ Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ProtocolName,
+ &CGM.getModule());
+ PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ UsedGlobals.push_back(PTGV);
+ return Builder.CreateLoad(PTGV, false, "tmp");
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+/// const char * const name;
+/// struct _class_t *const cls;
+/// const struct _method_list_t * const instance_methods;
+/// const struct _method_list_t * const class_methods;
+/// const struct _protocol_list_t * const protocols;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+ std::string ExtCatName(Prefix + Interface->getNameAsString()+
+ "_$_" + OCD->getNameAsString());
+ std::string ExtClassName(getClassSymbolPrefix() +
+ Interface->getNameAsString());
+
+ std::vector<llvm::Constant*> Values(6);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ // meta-class entry symbol
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
+ Values[1] = ClassGV;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName(Prefix);
+ MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
+ "_$_" + OCD->getNameAsString();
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(CGM.getContext()),
+ e = OCD->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[2] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+
+ MethodListName = Prefix;
+ MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString();
+ Methods.clear();
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(CGM.getContext()),
+ e = OCD->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[3] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ std::string ExtName(Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString());
+ Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ + Interface->getNameAsString() + "_$_"
+ + Category->getNameAsString(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ Values[5] =
+ EmitPropertyList(std::string("\01l_OBJC_$_PROP_LIST_") + ExtName,
+ OCD, Category, ObjCTypes);
+ }
+ else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+ Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+ Values);
+ llvm::GlobalVariable *GCATV
+ = new llvm::GlobalVariable(ObjCTypes.CategorynfABITy,
+ false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ ExtCatName,
+ &CGM.getModule());
+ GCATV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.CategorynfABITy));
+ GCATV->setSection("__DATA, __objc_const");
+ UsedGlobals.push_back(GCATV);
+ DefinedCategories.push_back(GCATV);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(OCD))
+ DefinedNonLazyCategories.push_back(GCATV);
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+ const ObjCMethodDecl *MD) {
+ // FIXME: Use DenseMap::lookup
+ llvm::Function *Fn = MethodDefinitions[MD];
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(
+ const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ // sizeof(struct _objc_method)
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ // method_count
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ GV->setSection(Section);
+ UsedGlobals.push_back(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable * CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ // FIXME: We shouldn't need to do this lookup.
+ unsigned Index;
+ const ObjCInterfaceDecl *Container =
+ FindIvarInterface(CGM.getContext(), ID, Ivar, Index);
+ assert(Container && "Unable to find ivar container!");
+ std::string Name = "OBJC_IVAR_$_" + Container->getNameAsString() +
+ '.' + Ivar->getNameAsString();
+ llvm::GlobalVariable *IvarOffsetGV =
+ CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV)
+ IvarOffsetGV =
+ new llvm::GlobalVariable(ObjCTypes.LongTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name,
+ &CGM.getModule());
+ return IvarOffsetGV;
+}
+
+llvm::Constant * CGObjCNonFragileABIMac::EmitIvarOffsetVar(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int Offset) {
+ llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+ IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
+ Offset));
+ IvarOffsetGV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.LongTy));
+
+ // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+ // well (i.e., in ObjCIvarOffsetVariable).
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ CGM.getDeclVisibilityMode(ID) == LangOptions::Hidden)
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ else
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ IvarOffsetGV->setSection("__DATA, __objc_const");
+ return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// char *name;
+/// char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _iver_t list[count];
+/// }
+///
+
+void CGObjCCommonMac::GetNamedIvarList(const ObjCInterfaceDecl *OID,
+ llvm::SmallVector<ObjCIvarDecl*, 16> &Res) const {
+ for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
+ E = OID->ivar_end(); I != E; ++I) {
+ // Ignore unnamed bit-fields.
+ if (!(*I)->getDeclName())
+ continue;
+
+ Res.push_back(*I);
+ }
+
+ // Also save synthesize ivars.
+ // FIXME. Why can't we just use passed in Res small vector?
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().CollectSynthesizedIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ Res.push_back(Ivars[k]);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+ const ObjCImplementationDecl *ID) {
+
+ std::vector<llvm::Constant*> Ivars, Ivar(5);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+ // FIXME. Consolidate this with similar code in GenerateClass.
+
+ // Collect declared and synthesized ivars in a small vector.
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ GetNamedIvarList(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD));
+ Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[2] = GetMethodVarType(IVD);
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(IVD->getType());
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Align = CGM.getContext().getPreferredTypeAlign(
+ IVD->getType().getTypePtr()) >> 3;
+ Align = llvm::Log2_32(Align);
+ Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ // NOTE. Size of a bitfield does not match gcc's, because of the
+ // way bitfields are treated special in each. But I am told that
+ // 'size' for bitfield ivars is ignored by the runtime so it does
+ // not matter. If it matters, there is enough info to get the
+ // bitfield right!
+ Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ }
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ std::vector<llvm::Constant*> Values(3);
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+ Ivars.size());
+ Values[2] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Prefix + OID->getNameAsString(),
+ &CGM.getModule());
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ GV->setSection("__DATA, __objc_const");
+
+ UsedGlobals.push_back(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getNameAsString(),
+ &CGM.getModule());
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char * const protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t * const instance_methods;
+/// const struct method_list_t * const class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ const char *ProtocolName = PD->getNameAsCString();
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(CGM.getContext()),
+ e = PD->instmeth_end(CGM.getContext());
+ i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(CGM.getContext()),
+ e = PD->classmeth_end(CGM.getContext());
+ i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(10);
+ // isa is NULL
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] = EmitProtocolList(
+ "\01l_OBJC_$_PROTOCOL_REFS_" + PD->getNameAsString(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+
+ Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ InstanceMethods);
+ Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ ClassMethods);
+ Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ OptInstanceMethods);
+ Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ OptClassMethods);
+ Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getNameAsString(),
+ 0, PD, ObjCTypes);
+ uint32_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ std::string("\01l_OBJC_PROTOCOL_$_")+ProtocolName,
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABITy));
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ // Use this protocol meta-data to build protocol list table in section
+ // __DATA, __objc_protolist
+ llvm::GlobalVariable *PTGV = new llvm::GlobalVariable(
+ ObjCTypes.ProtocolnfABIPtrTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Entry,
+ std::string("\01l_OBJC_LABEL_PROTOCOL_$_")
+ +ProtocolName,
+ &CGM.getModule());
+ PTGV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ UsedGlobals.push_back(PTGV);
+ return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ // Just return null for empty protocol lists
+ if (begin == end)
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+ // FIXME: We shouldn't need to do this lookup here, should we?
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+ if (GV)
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(
+ ObjCTypes.ProtocolnfABIPtrTy));
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[1] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ GV = new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ GV->setSection("__DATA, __objc_const");
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ UsedGlobals.push_back(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(3);
+ Desc[0] = llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+ CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),
+ false, "ivar");
+}
+
+CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs) {
+ // FIXME. Even though IsSuper is passes. This function doese not handle calls
+ // to 'super' receivers.
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::Value *Arg0 = Receiver;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+
+ // Find the message function name.
+ // FIXME. This is too much work to get the ABI-specific result type needed to
+ // find the message name.
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType,
+ llvm::SmallVector<QualType, 16>());
+ llvm::Constant *Fn = 0;
+ std::string Name("\01l_");
+ if (CGM.ReturnTypeUsesSret(FnInfo)) {
+#if 0
+ // unlike what is documented. gcc never generates this API!!
+ if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+ Fn = ObjCTypes.getMessageSendIdStretFixupFn();
+ // FIXME. Is there a better way of getting these names.
+ // They are available in RuntimeFunctions vector pair.
+ Name += "objc_msgSendId_stret_fixup";
+ }
+ else
+#endif
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ Name += "objc_msgSendSuper2_stret_fixup";
+ }
+ else
+ {
+ Fn = ObjCTypes.getMessageSendStretFixupFn();
+ Name += "objc_msgSend_stret_fixup";
+ }
+ }
+ else if (!IsSuper && ResultType->isFloatingType()) {
+ if (const BuiltinType *BT = ResultType->getAsBuiltinType()) {
+ BuiltinType::Kind k = BT->getKind();
+ if (k == BuiltinType::LongDouble) {
+ Fn = ObjCTypes.getMessageSendFpretFixupFn();
+ Name += "objc_msgSend_fpret_fixup";
+ }
+ else {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
+ }
+ }
+ else {
+#if 0
+// unlike what is documented. gcc never generates this API!!
+ if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+ Fn = ObjCTypes.getMessageSendIdFixupFn();
+ Name += "objc_msgSendId_fixup";
+ }
+ else
+#endif
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ Name += "objc_msgSendSuper2_fixup";
+ }
+ else
+ {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
+ }
+ assert(Fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ Name += '_';
+ std::string SelName(Sel.getAsString());
+ // Replace all ':' in selector name with '_' ouch!
+ for(unsigned i = 0; i < SelName.size(); i++)
+ if (SelName[i] == ':')
+ SelName[i] = '_';
+ Name += SelName;
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (!GV) {
+ // Build message ref table entry.
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = Fn;
+ Values[1] = GetMethodVarName(Sel);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ GV = new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setAlignment(16);
+ GV->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+ llvm::Value *Arg1 = CGF.Builder.CreateBitCast(GV, ObjCTypes.MessageRefPtrTy);
+
+ CallArgList ActualArgs;
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg1),
+ ObjCTypes.MessageRefCPtrTy));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs);
+ llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
+ Callee = CGF.Builder.CreateLoad(Callee);
+ const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
+ Callee = CGF.Builder.CreateBitCast(Callee,
+ llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo1, Callee, ActualArgs);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue CGObjCNonFragileABIMac::GenerateMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ return LegacyDispatchedSelector(Sel)
+ ? EmitLegacyMessageSend(CGF, ResultType, EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, ObjCTypes)
+ : EmitMessageSend(CGF, ResultType, Sel,
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+ if (!GV) {
+ GV = new llvm::GlobalVariable(ObjCTypes.ClassnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, Name, &CGM.getModule());
+ }
+
+ return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_",
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_",
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+ if (Entry)
+ return Builder.CreateLoad(Entry, false, "tmp");
+
+ std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ MetaClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_",
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs) {
+ // ...
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to "super' in a class method defined in
+ // a category implementation.
+ Target = EmitClassRef(CGF.Builder, Class);
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ }
+ else
+ Target = EmitMetaClassRef(CGF.Builder, Class);
+ }
+ else
+ Target = EmitSuperClassRef(CGF.Builder, Class);
+
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ return (LegacyDispatchedSelector(Sel))
+ ? EmitLegacyMessageSend(CGF, ResultType,EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs,
+ ObjCTypes)
+ : EmitMessageSend(CGF, ResultType, Sel,
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
+ Selector Sel) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.SelectorPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Casted, "\01L_OBJC_SELECTOR_REFERENCES_",
+ &CGM.getModule());
+ Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, "assignivar");
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj)
+{
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ return;
+}
+
+void
+CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can safely avoid a temp here because jumps into
+ // @synchronized are illegal & this will dominate uses.
+ llvm::Value *SyncArg = 0;
+ if (!isTry) {
+ SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ }
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ CGF.setInvokeDest(TryHandler);
+
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the exception handler.
+
+ CGF.EmitBlock(TryHandler);
+
+ llvm::Value *llvm_eh_exception =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector_i64 =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i64);
+ llvm::Value *llvm_eh_typeid_for_i64 =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i64);
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+ SelectorArgs.push_back(Exc);
+ SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
+
+ // Construct the lists of (type, catch body) to handle.
+ llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+ bool HasCatchAll = false;
+ if (isTry) {
+ if (const ObjCAtCatchStmt* CatchStmt =
+ cast<ObjCAtTryStmt>(S).getCatchStmts()) {
+ for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+ const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
+
+ // catch(...) always matches.
+ if (!CatchDecl) {
+ // Use i8* null here to signal this is a catch all, not a cleanup.
+ llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ SelectorArgs.push_back(Null);
+ HasCatchAll = true;
+ break;
+ }
+
+ if (CGF.getContext().isObjCIdType(CatchDecl->getType()) ||
+ CatchDecl->getType()->isObjCQualifiedIdType()) {
+ llvm::Value *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id", &CGM.getModule());
+ SelectorArgs.push_back(IDEHType);
+ HasCatchAll = true;
+ break;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const PointerType *PT = CatchDecl->getType()->getAsPointerType();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT =
+ PT->getPointeeType()->getAsObjCInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
+ SelectorArgs.push_back(EHType);
+ }
+ }
+ }
+
+ // We use a cleanup unless there was already a catch all.
+ if (!HasCatchAll) {
+ SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ }
+
+ llvm::Value *Selector =
+ CGF.Builder.CreateCall(llvm_eh_selector_i64,
+ SelectorArgs.begin(), SelectorArgs.end(),
+ "selector");
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ const ParmVarDecl *CatchParam = Handlers[i].first;
+ const Stmt *CatchBody = Handlers[i].second;
+
+ llvm::BasicBlock *Next = 0;
+
+ // The last handler always matches.
+ if (i + 1 != e) {
+ assert(CatchParam && "Only last handler can be a catch all.");
+
+ llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+ Next = CGF.createBasicBlock("catch.next");
+ llvm::Value *Id =
+ CGF.Builder.CreateCall(llvm_eh_typeid_for_i64,
+ CGF.Builder.CreateBitCast(SelectorArgs[i+2],
+ ObjCTypes.Int8PtrTy));
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id),
+ Match, Next);
+
+ CGF.EmitBlock(Match);
+ }
+
+ if (CatchBody) {
+ llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
+ llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
+
+ // Cleanups must call objc_end_catch.
+ //
+ // FIXME: It seems incorrect for objc_begin_catch to be inside this
+ // context, but this matches gcc.
+ CGF.PushCleanupBlock(MatchEnd);
+ CGF.setInvokeDest(MatchHandler);
+
+ llvm::Value *ExcObject =
+ CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
+
+ // Bind the catch parameter if it exists.
+ if (CatchParam) {
+ ExcObject =
+ CGF.Builder.CreateBitCast(ExcObject,
+ CGF.ConvertType(CatchParam->getType()));
+ // CatchParam is a ParmVarDecl because of the grammar
+ // construction used to handle this, but for codegen purposes
+ // we treat this as a local decl.
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(ExcObject);
+ CGF.EmitStmt(CatchBody);
+ CGF.ObjCEHValueStack.pop_back();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(MatchHandler);
+
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ llvm::SmallVector<llvm::Value*, 8> Args;
+ Args.push_back(Exc);
+ Args.push_back(ObjCTypes.getEHPersonalityPtr());
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 0));
+ CGF.Builder.CreateCall(llvm_eh_selector_i64, Args.begin(), Args.end());
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.EmitBlock(MatchEnd);
+
+ // Unfortunately, we also have to generate another EH frame here
+ // in case this throws.
+ llvm::BasicBlock *MatchEndHandler =
+ CGF.createBasicBlock("match.end.handler");
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
+ Cont, MatchEndHandler,
+ Args.begin(), Args.begin());
+
+ CGF.EmitBlock(Cont);
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ CGF.EmitBlock(MatchEndHandler);
+ Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ Args.clear();
+ Args.push_back(Exc);
+ Args.push_back(ObjCTypes.getEHPersonalityPtr());
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 0));
+ CGF.Builder.CreateCall(llvm_eh_selector_i64, Args.begin(), Args.end());
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (Next)
+ CGF.EmitBlock(Next);
+ } else {
+ assert(!Next && "catchup should be last handler.");
+
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+
+ // Pop the cleanup entry, the @finally is outside this cleanup
+ // scope.
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+ CGF.setInvokeDest(PrevLandingPad);
+
+ CGF.EmitBlock(FinallyBlock);
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ }
+
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ // Branch around the rethrow code.
+ CGF.EmitBranch(FinallyEnd);
+
+ CGF.EmitBlock(FinallyRethrow);
+ CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *Exception;
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ Exception = CGF.EmitScalarExpr(ThrowExpr);
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ Exception = CGF.ObjCEHValueStack.back();
+ }
+
+ llvm::Value *ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
+ if (InvokeDest) {
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getExceptionThrowFn(),
+ Cont, InvokeDest,
+ &ExceptionAsObject, &ExceptionAsObject + 1);
+ CGF.EmitBlock(Cont);
+ } else
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition) {
+ llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+ // If we don't need a definition, return the entry if found or check
+ // if we use an external reference.
+ if (!ForDefinition) {
+ if (Entry)
+ return Entry;
+
+ // If this type (or a super class) has the __objc_exception__
+ // attribute, emit an external reference.
+ if (hasObjCExceptionAttribute(ID))
+ return Entry =
+ new llvm::GlobalVariable(ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ (std::string("OBJC_EHTYPE_$_") +
+ ID->getIdentifier()->getName()),
+ &CGM.getModule());
+ }
+
+ // Otherwise we need to either make a new entry or fill in the
+ // initializer.
+ assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string VTableName = "objc_ehtype_vtable";
+ llvm::GlobalVariable *VTableGV =
+ CGM.getModule().getGlobalVariable(VTableName);
+ if (!VTableGV)
+ VTableGV = new llvm::GlobalVariable(ObjCTypes.Int8PtrTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, VTableName, &CGM.getModule());
+
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 2);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
+ Values[1] = GetClassName(ID->getIdentifier());
+ Values[2] = GetClassGlobal(ClassName);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+ if (Entry) {
+ Entry->setInitializer(Init);
+ } else {
+ Entry = new llvm::GlobalVariable(ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ (std::string("OBJC_EHTYPE_$_") +
+ ID->getIdentifier()->getName()),
+ &CGM.getModule());
+ }
+
+ if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Entry->setAlignment(8);
+
+ if (ForDefinition) {
+ Entry->setSection("__DATA,__objc_const");
+ Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCMac(CGM);
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacNonFragileABIObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCNonFragileABIMac(CGM);
+}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..b8cf026
--- /dev/null
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,206 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation. Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
+#define CLANG_CODEGEN_OBCJRUNTIME_H
+#include "clang/Basic/IdentifierTable.h" // Selector
+#include "llvm/ADT/SmallVector.h"
+#include "clang/AST/DeclObjC.h"
+#include <string>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Constant;
+ class Function;
+ class Module;
+ class StructLayout;
+ class StructType;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenFunction;
+}
+
+ class FieldDecl;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCContainerDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCProtocolDecl;
+ class Selector;
+ class ObjCIvarDecl;
+ class ObjCStringLiteral;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+public:
+ // Utility functions for unified ivar access. These need to
+ // eventually be folded into other places (the structure layout
+ // code).
+
+protected:
+ /// Compute an offset to the given ivar, suitable for passing to
+ /// EmitValueForIvarAtOffset. Note that the correct handling of
+ /// bit-fields is carefully coordinated by these two, use caution!
+ ///
+ /// The latter overload is suitable for computing the offset of a
+ /// sythesized ivar.
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar);
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar);
+
+ LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset);
+
+public:
+ virtual ~CGObjCRuntime();
+
+ /// Generate the function required to register all Objective-C components in
+ /// this compilation unit with the runtime library.
+ virtual llvm::Function *ModuleInitFunction() = 0;
+
+ /// Get a selector for the specified name and type values. The
+ /// return value should have the LLVM type for pointer-to
+ /// ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ Selector Sel) = 0;
+
+ /// Get a typed selector.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method) = 0;
+
+ /// Generate a constant string object.
+ virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *) = 0;
+
+ /// Generate a category. A category contains a list of methods (and
+ /// accompanying metadata) and a list of protocols.
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+ /// Generate a class stucture for this class.
+ virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+ /// Generate an Objective-C message send operation.
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method=0) = 0;
+
+ /// Generate an Objective-C message send operation to the super
+ /// class initiated in a method for Class and with the given Self
+ /// object.
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Self,
+ bool IsClassMessage,
+ const CallArgList &CallArgs) = 0;
+
+ /// Emit the code to return the named protocol as an object, as in a
+ /// @protocol expression.
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
+ virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate a function preamble for a method with the specified
+ /// types.
+
+ // FIXME: Current this just generates the Function definition, but really this
+ // should also be generating the loads of the parameters, as the runtime
+ // should have full control over how parameters are passed.
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) = 0;
+
+ /// Return the runtime function for getting properties.
+ virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+ /// Return the runtime function for setting properties.
+ virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+ /// GetClass - Return a reference to the class for the given
+ /// interface decl.
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) = 0;
+
+ /// EnumerationMutationFunction - Return the function that's called by the
+ /// compiler when a mutation is detected during foreach iteration.
+ virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) = 0;
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) = 0;
+ virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) = 0;
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) = 0;
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) = 0;
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacNonFragileABIObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..b67996c
--- /dev/null
+++ b/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,1022 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(S->getLocStart());
+ DI->EmitStopPoint(CurFn, Builder);
+ }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+ assert(S && "Null statement?");
+
+ // Check if we can handle this without bothering to generate an
+ // insert point or debug info.
+ if (EmitSimpleStmt(S))
+ return;
+
+ // If we happen to be at an unreachable point just create a dummy
+ // basic block to hold the code. We could change parts of irgen to
+ // simply not generate this code, but this situation is rare and
+ // probably not worth the effort.
+ // FIXME: Verify previous performance/effort claim.
+ EnsureInsertPoint();
+
+ // Generate a stoppoint if we are emitting debug info.
+ EmitStopPoint(S);
+
+ switch (S->getStmtClass()) {
+ default:
+ // Must be an expression in a stmt context. Emit the value (to get
+ // side-effects) and ignore the result.
+ if (const Expr *E = dyn_cast<Expr>(S)) {
+ EmitAnyExpr(E, 0, false, true);
+ } else {
+ ErrorUnsupported(S, "statement");
+ }
+ break;
+ case Stmt::IndirectGotoStmtClass:
+ EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+ case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
+ case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
+ case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
+ case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
+
+ case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
+ case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
+
+ case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
+ case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+
+ case Stmt::ObjCAtTryStmtClass:
+ EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+ break;
+ case Stmt::ObjCAtCatchStmtClass:
+ assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtFinallyStmtClass:
+ assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtThrowStmtClass:
+ EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+ break;
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+ break;
+ }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default: return false;
+ case Stmt::NullStmtClass: break;
+ case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+ case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
+ case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
+ case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+ case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
+ case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
+ }
+
+ return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ llvm::Value *AggLoc, bool isAggVol) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+ "LLVM IR generation of compound statement ('{}')");
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ EnsureInsertPoint();
+ DI->setLocation(S.getLBracLoc());
+ // FIXME: The llvm backend is currently not ready to deal with region_end
+ // for block scoping. In the presence of always_inline functions it gets so
+ // confused that it doesn't emit any debug info. Just disable this for now.
+ //DI->EmitRegionStart(CurFn, Builder);
+ }
+
+ // Keep track of the current cleanup stack depth.
+ size_t CleanupStackDepth = CleanupEntries.size();
+ bool OldDidCallStackSave = DidCallStackSave;
+ DidCallStackSave = false;
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end()-GetLast; I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI) {
+ EnsureInsertPoint();
+ DI->setLocation(S.getRBracLoc());
+
+ // FIXME: The llvm backend is currently not ready to deal with region_end
+ // for block scoping. In the presence of always_inline functions it gets so
+ // confused that it doesn't emit any debug info. Just disable this for now.
+ //DI->EmitRegionEnd(CurFn, Builder);
+ }
+
+ RValue RV;
+ if (!GetLast)
+ RV = RValue::get(0);
+ else {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ const Stmt *LastStmt = S.body_back();
+ while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+ EmitLabel(*LS);
+ LastStmt = LS->getSubStmt();
+ }
+
+ EnsureInsertPoint();
+
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+ }
+
+ DidCallStackSave = OldDidCallStackSave;
+
+ EmitCleanupBlocks(CleanupStackDepth);
+
+ return RV;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+ llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+ // If there is a cleanup stack, then we it isn't worth trying to
+ // simplify this block (we would need to remove it from the scope map
+ // and cleanup entry).
+ if (!CleanupEntries.empty())
+ return;
+
+ // Can only simplify direct branches.
+ if (!BI || !BI->isUnconditional())
+ return;
+
+ BB->replaceAllUsesWith(BI->getSuccessor(0));
+ BI->eraseFromParent();
+ BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ // Fall out of the current block (if necessary).
+ EmitBranch(BB);
+
+ if (IsFinished && BB->use_empty()) {
+ delete BB;
+ return;
+ }
+
+ // If necessary, associate the block with the cleanup stack size.
+ if (!CleanupEntries.empty()) {
+ // Check if the basic block has already been inserted.
+ BlockScopeMap::iterator I = BlockScopes.find(BB);
+ if (I != BlockScopes.end()) {
+ assert(I->second == CleanupEntries.size() - 1);
+ } else {
+ BlockScopes[BB] = CleanupEntries.size() - 1;
+ CleanupEntries.back().Blocks.push_back(BB);
+ }
+ }
+
+ CurFn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+ // Emit a branch from the current block to the target one if this
+ // was a real block. If this was just a fall-through block after a
+ // terminator, don't emit it.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (!CurBB || CurBB->getTerminator()) {
+ // If there is no insert point or the previous block is already
+ // terminated, don't touch it.
+ } else {
+ // Otherwise, create a fall-through branch.
+ Builder.CreateBr(Target);
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitLabel(const LabelStmt &S) {
+ EmitBlock(getBasicBlockForLabel(&S));
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+ EmitLabel(S);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
+}
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ // Emit initial switch which will be patched up later by
+ // EmitIndirectSwitches(). We need a default dest, so we use the
+ // current BB, but this is overwritten.
+ llvm::Value *V = Builder.CreatePtrToInt(EmitScalarExpr(S.getTarget()),
+ llvm::Type::Int32Ty,
+ "addr");
+ llvm::SwitchInst *I = Builder.CreateSwitch(V, Builder.GetInsertBlock());
+ IndirectSwitches.push_back(I);
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // C99 6.8.4.1: The first substatement is executed if the expression compares
+ // unequal to 0. The condition must be a scalar type.
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
+ // Figure out which block (then or else) is executed.
+ const Stmt *Executed = S.getThen(), *Skipped = S.getElse();
+ if (Cond == -1) // Condition false?
+ std::swap(Executed, Skipped);
+
+ // If the skipped block has no labels in it, just emit the executed block.
+ // This avoids emitting dead code and simplifies the CFG substantially.
+ if (!ContainsLabel(Skipped)) {
+ if (Executed)
+ EmitStmt(Executed);
+ return;
+ }
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
+ // the conditional branch.
+ llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+ llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+ llvm::BasicBlock *ElseBlock = ContBlock;
+ if (S.getElse())
+ ElseBlock = createBasicBlock("if.else");
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+ // Emit the 'then' code.
+ EmitBlock(ThenBlock);
+ EmitStmt(S.getThen());
+ EmitBranch(ContBlock);
+
+ // Emit the 'else' code if present.
+ if (const Stmt *Else = S.getElse()) {
+ EmitBlock(ElseBlock);
+ EmitStmt(Else);
+ EmitBranch(ContBlock);
+ }
+
+ // Emit the continuation block for code after the if.
+ EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+ // Emit the header for the loop, insert it, which will create an uncond br to
+ // it.
+ llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
+ EmitBlock(LoopHeader);
+
+ // Create an exit block for when the condition fails, create a block for the
+ // body of the loop.
+ llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
+
+ // Evaluate the conditional in the while header. C99 6.8.5.1: The
+ // evaluation of the controlling expression takes place before each
+ // execution of the loop body.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // while(1) is common, avoid extra exit blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, go to the loop body.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ // Emit the loop body.
+ EmitBlock(LoopBody);
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Cycle to the condition.
+ EmitBranch(LoopHeader);
+
+ // Emit the exit block.
+ EmitBlock(ExitBlock, true);
+
+ // The LoopHeader typically is just a branch if we skipped emitting
+ // a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader);
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
+ // Emit the body for the loop, insert it, which will create an uncond br to
+ // it.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
+ EmitBlock(LoopBody);
+
+ llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
+
+ // Emit the body of the loop into the block.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(DoCond);
+
+ // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+ // after each execution of the loop body."
+
+ // Evaluate the conditional in the while header.
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isZero())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, iterate the loop.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
+
+ // Emit the exit block.
+ EmitBlock(AfterDo);
+
+ // The DoCond block typically is just a branch if we skipped
+ // emitting a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(DoCond);
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S) {
+ // FIXME: What do we do if the increment (f.e.) contains a stmt expression,
+ // which contains a continue/break?
+
+ // Evaluate the first part before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ // Evaluate the condition if present. If not, treat it as a
+ // non-zero-constant according to 6.8.5.3p2, aka, true.
+ if (S.getCond()) {
+ // As long as the condition is true, iterate the loop.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ EmitBranchOnBoolExpr(S.getCond(), ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+ } else {
+ // Treat it as a non-zero constant. Don't even create a new block for the
+ // body, just fall into it.
+ }
+
+ // If the for loop doesn't have an increment we can just use the
+ // condition as the continue block.
+ llvm::BasicBlock *ContinueBlock;
+ if (S.getInc())
+ ContinueBlock = createBasicBlock("for.inc");
+ else
+ ContinueBlock = CondBlock;
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
+
+ // If the condition is true, execute the body of the for stmt.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // If there is an increment, emit it next.
+ if (S.getInc()) {
+ EmitBlock(ContinueBlock);
+ EmitStmt(S.getInc());
+ }
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+ if (RV.isScalar()) {
+ Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+ } else if (RV.isAggregate()) {
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ } else {
+ StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
+ }
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void. Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+ // Emit the result value, even if unused, to evalute the side effects.
+ const Expr *RV = S.getRetValue();
+
+ // FIXME: Clean this up by using an LValue for ReturnTemp,
+ // EmitStoreThroughLValue, and EmitAnyExpr.
+ if (!ReturnValue) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (RV)
+ EmitAnyExpr(RV);
+ } else if (RV == 0) {
+ // Do nothing (return value is left uninitialized)
+ } else if (FnRetTy->isReferenceType()) {
+ // If this function returns a reference, take the address of the expression
+ // rather than the value.
+ Builder.CreateStore(EmitLValue(RV).getAddress(), ReturnValue);
+ } else if (!hasAggregateLLVMType(RV->getType())) {
+ Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ } else if (RV->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(RV, ReturnValue, false);
+ } else {
+ EmitAggExpr(RV, ReturnValue, false);
+ }
+
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+ for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
+ I != E; ++I)
+ EmitDecl(**I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+ assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+ assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+ llvm::APSInt LHS = S.getLHS()->EvaluateAsInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateAsInt(getContext());
+
+ // Emit the code for this case. We do this first to make sure it is
+ // properly chained from our predecessor before generating the
+ // switch machinery to enter this block.
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ EmitStmt(S.getSubStmt());
+
+ // If range is empty, do nothing.
+ if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+ return;
+
+ llvm::APInt Range = RHS - LHS;
+ // FIXME: parameters such as this should not be hardcoded.
+ if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+ // Range is small enough to add multiple switch instruction cases.
+ for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+ SwitchInsn->addCase(llvm::ConstantInt::get(LHS), CaseDest);
+ LHS++;
+ }
+ return;
+ }
+
+ // The range is too big. Emit "if" condition into a new block,
+ // making sure to save and restore the current insertion point.
+ llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+ // Push this test onto the chain of range checks (which terminates
+ // in the default basic block). The switch's default will be changed
+ // to the top of this chain after switch emission is complete.
+ llvm::BasicBlock *FalseDest = CaseRangeBlock;
+ CaseRangeBlock = createBasicBlock("sw.caserange");
+
+ CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ Builder.SetInsertPoint(CaseRangeBlock);
+
+ // Emit range check.
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(), llvm::ConstantInt::get(LHS),
+ "tmp");
+ llvm::Value *Cond =
+ Builder.CreateICmpULE(Diff, llvm::ConstantInt::get(Range), "tmp");
+ Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+ // Restore the appropriate insertion point.
+ if (RestoreBB)
+ Builder.SetInsertPoint(RestoreBB);
+ else
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ if (S.getRHS()) {
+ EmitCaseStmtRange(S);
+ return;
+ }
+
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(CaseVal), CaseDest);
+
+ // Recursively emitting the statement is acceptable, but is not wonderful for
+ // code where we have many case statements nested together, i.e.:
+ // case 1:
+ // case 2:
+ // case 3: etc.
+ // Handling this recursively will create a new block for each case statement
+ // that falls through to the next case which is IR intensive. It also causes
+ // deep recursion which can run into stack depth limitations. Handle
+ // sequential non-range case statements specially.
+ const CaseStmt *CurCase = &S;
+ const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+ // Otherwise, iteratively add consequtive cases to this switch stmt.
+ while (NextCase && NextCase->getRHS() == 0) {
+ CurCase = NextCase;
+ CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(CaseVal), CaseDest);
+
+ NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+ }
+
+ // Normal default recursion for non-cases.
+ EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+ llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+ assert(DefaultBlock->empty() &&
+ "EmitDefaultStmt: Default block already defined?");
+ EmitBlock(DefaultBlock);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+ llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+ // Create basic block to hold stuff that comes after switch
+ // statement. We also need to create a default block now so that
+ // explicit case ranges tests can have a place to jump to on
+ // failure.
+ llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
+ llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+ SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+ CaseRangeBlock = DefaultBlock;
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // All break statements jump to NextBlock. If BreakContinueStack is non empty
+ // then reuse last ContinueBlock.
+ llvm::BasicBlock *ContinueBlock = 0;
+ if (!BreakContinueStack.empty())
+ ContinueBlock = BreakContinueStack.back().ContinueBlock;
+
+ // Ensure any vlas created between there and here, are undone
+ BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
+
+ // Emit switch body.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Update the default block in case explicit case range tests have
+ // been chained on top.
+ SwitchInsn->setSuccessor(0, CaseRangeBlock);
+
+ // If a default was never emitted then reroute any jumps to it and
+ // discard.
+ if (!DefaultBlock->getParent()) {
+ DefaultBlock->replaceAllUsesWith(NextBlock);
+ delete DefaultBlock;
+ }
+
+ // Emit continuation.
+ EmitBlock(NextBlock, true);
+
+ SwitchInsn = SavedSwitchInsn;
+ CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, TargetInfo &Target,
+ llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+ std::string Result;
+
+ while (*Constraint) {
+ switch (*Constraint) {
+ default:
+ Result += Target.convertConstraint(*Constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ break;
+ case 'g':
+ Result += "imr";
+ break;
+ case '[': {
+ assert(OutCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned Index;
+ bool result = Target.resolveSymbolicName(Constraint,
+ &(*OutCons)[0],
+ OutCons->size(), Index);
+ assert(result && "Could not resolve symbolic name"); result=result;
+ Result += llvm::utostr(Index);
+ break;
+ }
+ }
+
+ Constraint++;
+ }
+
+ return Result;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ llvm::Value *Arg;
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
+ const llvm::Type *Ty = ConvertType(InputExpr->getType());
+
+ if (Ty->isSingleValueType()) {
+ Arg = EmitScalarExpr(InputExpr);
+ } else {
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+
+ uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ Ty = llvm::IntegerType::get(Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
+ } else {
+ Arg = Dest.getAddress();
+ ConstraintStr += '*';
+ }
+ }
+ } else {
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ Arg = Dest.getAddress();
+ ConstraintStr += '*';
+ }
+
+ return Arg;
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
+
+ // Assemble the pieces into the final asm string.
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+
+ // Get all the output and input constraints together.
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
+ S.getOutputName(i));
+ bool result = Target.validateOutputConstraint(Info);
+ assert(result && "Failed to parse output constraint"); result=result;
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
+ S.getInputName(i));
+ bool result = Target.validateInputConstraint(OutputConstraintInfos.data(),
+ S.getNumOutputs(),
+ Info); result=result;
+ assert(result && "Failed to parse input constraint");
+ InputConstraintInfos.push_back(Info);
+ }
+
+ std::string Constraints;
+
+ std::vector<LValue> ResultRegDests;
+ std::vector<QualType> ResultRegQualTys;
+ std::vector<const llvm::Type *> ResultRegTypes;
+ std::vector<const llvm::Type *> ResultTruncRegTypes;
+ std::vector<const llvm::Type*> ArgTypes;
+ std::vector<llvm::Value*> Args;
+
+ // Keep track of inout constraints.
+ std::string InOutConstraints;
+ std::vector<llvm::Value*> InOutArgs;
+ std::vector<const llvm::Type*> InOutArgTypes;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string OutputConstraint(S.getOutputConstraint(i));
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+
+ const Expr *OutExpr = S.getOutputExpr(i);
+ OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+ LValue Dest = EmitLValue(OutExpr);
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // If this is a register output, then make the inline asm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
+ Constraints += "=" + OutputConstraint;
+ ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegDests.push_back(Dest);
+ ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+ ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+ // If this output is tied to an input, and if the input is larger, then
+ // we need to set the actual result type of the inline asm node to be the
+ // same as the input type.
+ if (Info.hasMatchingInput()) {
+ unsigned InputNo;
+ for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+ TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+ if (Input.hasTiedOperand() &&
+ Input.getTiedOperand() == i)
+ break;
+ }
+ assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+ QualType InputTy = S.getInputExpr(InputNo)->getType();
+ QualType OutputTy = OutExpr->getType();
+
+ uint64_t InputSize = getContext().getTypeSize(InputTy);
+ if (getContext().getTypeSize(OutputTy) < InputSize) {
+ // Form the asm to return the value as a larger integer type.
+ ResultRegTypes.back() = llvm::IntegerType::get((unsigned)InputSize);
+ }
+ }
+
+ } else {
+ ArgTypes.push_back(Dest.getAddress()->getType());
+ Args.push_back(Dest.getAddress());
+ Constraints += "=*";
+ Constraints += OutputConstraint;
+ }
+
+ if (Info.isReadWrite()) {
+ InOutConstraints += ',';
+
+ const Expr *InputExpr = S.getOutputExpr(i);
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, InOutConstraints);
+
+ if (Info.allowsRegister())
+ InOutConstraints += llvm::utostr(i);
+ else
+ InOutConstraints += OutputConstraint;
+
+ InOutArgTypes.push_back(Arg->getType());
+ InOutArgs.push_back(Arg);
+ }
+ }
+
+ unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ const Expr *InputExpr = S.getInputExpr(i);
+
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // Simplify the input constraint.
+ std::string InputConstraint(S.getInputConstraint(i));
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+ &OutputConstraintInfos);
+
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+
+ // If this input argument is tied to a larger output result, extend the
+ // input to be the same size as the output. The LLVM backend wants to see
+ // the input and output of a matching constraint be the same size. Note
+ // that GCC does not define what the top bits are here. We use zext because
+ // that is usually cheaper, but LLVM IR should really get an anyext someday.
+ if (Info.hasTiedOperand()) {
+ unsigned Output = Info.getTiedOperand();
+ QualType OutputTy = S.getOutputExpr(Output)->getType();
+ QualType InputTy = InputExpr->getType();
+
+ if (getContext().getTypeSize(OutputTy) >
+ getContext().getTypeSize(InputTy)) {
+ // Use ptrtoint as appropriate so that we can do our extension.
+ if (isa<llvm::PointerType>(Arg->getType()))
+ Arg = Builder.CreatePtrToInt(Arg,
+ llvm::IntegerType::get(LLVMPointerWidth));
+ unsigned OutputSize = (unsigned)getContext().getTypeSize(OutputTy);
+ Arg = Builder.CreateZExt(Arg, llvm::IntegerType::get(OutputSize));
+ }
+ }
+
+
+ ArgTypes.push_back(Arg->getType());
+ Args.push_back(Arg);
+ Constraints += InputConstraint;
+ }
+
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+ std::string Clobber(S.getClobber(i)->getStrData(),
+ S.getClobber(i)->getByteLength());
+
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber.c_str());
+
+ if (i != 0 || NumConstraints != 0)
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = Target.getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ const llvm::Type *ResultType;
+ if (ResultRegTypes.empty())
+ ResultType = llvm::Type::VoidTy;
+ else if (ResultRegTypes.size() == 1)
+ ResultType = ResultRegTypes[0];
+ else
+ ResultType = llvm::StructType::get(ResultRegTypes);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints,
+ S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+
+
+ // Extract all of the register value results from the asm.
+ std::vector<llvm::Value*> RegResults;
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+ const llvm::Type *TruncTy = ResultTruncRegTypes[i];
+ // Truncate the integer result to the right size, note that
+ // ResultTruncRegTypes can be a pointer.
+ uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get((unsigned)ResSize));
+
+ if (Tmp->getType() != TruncTy) {
+ assert(isa<llvm::PointerType>(TruncTy));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ }
+ }
+
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
+ ResultRegQualTys[i]);
+ }
+}
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..820e1bd6
--- /dev/null
+++ b/lib/CodeGen/CGValue.h
@@ -0,0 +1,323 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVALUE_H
+#define CLANG_CODEGEN_CGVALUE_H
+
+#include "clang/AST/Type.h"
+
+namespace llvm {
+ class Constant;
+ class Value;
+}
+
+namespace clang {
+ class ObjCPropertyRefExpr;
+ class ObjCKVCRefExpr;
+
+namespace CodeGen {
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated. It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+ llvm::Value *V1, *V2;
+ // TODO: Encode this into the low bit of pointer for more efficient
+ // return-by-value.
+ enum { Scalar, Complex, Aggregate } Flavor;
+
+ bool Volatile:1;
+public:
+
+ bool isScalar() const { return Flavor == Scalar; }
+ bool isComplex() const { return Flavor == Complex; }
+ bool isAggregate() const { return Flavor == Aggregate; }
+
+ bool isVolatileQualified() const { return Volatile; }
+
+ /// getScalar() - Return the Value* of this scalar value.
+ llvm::Value *getScalarVal() const {
+ assert(isScalar() && "Not a scalar!");
+ return V1;
+ }
+
+ /// getComplexVal - Return the real/imag components of this complex value.
+ ///
+ std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+ return std::pair<llvm::Value *, llvm::Value *>(V1, V2);
+ }
+
+ /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+ llvm::Value *getAggregateAddr() const {
+ assert(isAggregate() && "Not an aggregate!");
+ return V1;
+ }
+
+ static RValue get(llvm::Value *V) {
+ RValue ER;
+ ER.V1 = V;
+ ER.Flavor = Scalar;
+ ER.Volatile = false;
+ return ER;
+ }
+ static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+ RValue ER;
+ ER.V1 = V1;
+ ER.V2 = V2;
+ ER.Flavor = Complex;
+ ER.Volatile = false;
+ return ER;
+ }
+ static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+ RValue ER;
+ ER.V1 = C.first;
+ ER.V2 = C.second;
+ ER.Flavor = Complex;
+ ER.Volatile = false;
+ return ER;
+ }
+ // FIXME: Aggregate rvalues need to retain information about whether they are
+ // volatile or not. Remove default to find all places that probably get this
+ // wrong.
+ static RValue getAggregate(llvm::Value *V, bool Vol = false) {
+ RValue ER;
+ ER.V1 = V;
+ ER.Flavor = Aggregate;
+ ER.Volatile = Vol;
+ return ER;
+ }
+};
+
+
+/// LValue - This represents an lvalue references. Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+ // FIXME: alignment?
+
+ enum {
+ Simple, // This is a normal l-value, use getAddress().
+ VectorElt, // This is a vector element l-value (V[i]), use getVector*
+ BitField, // This is a bitfield l-value, use getBitfield*.
+ ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
+ PropertyRef, // This is an Objective-C property reference, use
+ // getPropertyRefExpr
+ KVCRef // This is an objective-c 'implicit' property ref,
+ // use getKVCRefExpr
+ } LVType;
+
+ enum ObjCType {
+ None = 0, // object with no gc attribute.
+ Weak, // __weak object expression
+ Strong // __strong object expression
+ };
+
+ llvm::Value *V;
+
+ union {
+ // Index into a vector subscript: V[i]
+ llvm::Value *VectorIdx;
+
+ // ExtVector element subset: V.xyx
+ llvm::Constant *VectorElts;
+
+ // BitField start bit and size
+ struct {
+ unsigned short StartBit;
+ unsigned short Size;
+ bool IsSigned;
+ } BitfieldData;
+
+ // Obj-C property reference expression
+ const ObjCPropertyRefExpr *PropertyRefExpr;
+ // ObjC 'implicit' property reference expression
+ const ObjCKVCRefExpr *KVCRefExpr;
+ };
+
+ bool Volatile:1;
+ // FIXME: set but never used, what effect should it have?
+ bool Restrict:1;
+
+ // objective-c's ivar
+ bool Ivar:1;
+
+ // LValue is non-gc'able for any reason, including being a parameter or local
+ // variable.
+ bool NonGC: 1;
+
+ // Lvalue is a global reference of an objective-c object
+ bool GlobalObjCRef : 1;
+
+ // objective-c's gc attributes
+ unsigned ObjCType : 2;
+
+
+
+private:
+ static void SetQualifiers(unsigned Qualifiers, LValue& R) {
+ R.Volatile = (Qualifiers&QualType::Volatile)!=0;
+ R.Restrict = (Qualifiers&QualType::Restrict)!=0;
+ // FIXME: Convenient place to set objc flags to 0. This should really be
+ // done in a user-defined constructor instead.
+ R.ObjCType = None;
+ R.Ivar = R.NonGC = R.GlobalObjCRef = false;
+ }
+
+public:
+ bool isSimple() const { return LVType == Simple; }
+ bool isVectorElt() const { return LVType == VectorElt; }
+ bool isBitfield() const { return LVType == BitField; }
+ bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+ bool isPropertyRef() const { return LVType == PropertyRef; }
+ bool isKVCRef() const { return LVType == KVCRef; }
+
+ bool isVolatileQualified() const { return Volatile; }
+ bool isRestrictQualified() const { return Restrict; }
+ unsigned getQualifiers() const {
+ return (Volatile ? QualType::Volatile : 0) |
+ (Restrict ? QualType::Restrict : 0);
+ }
+
+ bool isObjCIvar() const { return Ivar; }
+ bool isNonGC () const { return NonGC; }
+ bool isGlobalObjCRef() const { return GlobalObjCRef; }
+ bool isObjCWeak() const { return ObjCType == Weak; }
+ bool isObjCStrong() const { return ObjCType == Strong; }
+
+ static void SetObjCIvar(LValue& R, bool iValue) {
+ R.Ivar = iValue;
+ }
+
+ static void SetGlobalObjCRef(LValue& R, bool iValue) {
+ R.GlobalObjCRef = iValue;
+ }
+
+ static void SetObjCNonGC(LValue& R, bool iValue) {
+ R.NonGC = iValue;
+ }
+ static void SetObjCType(QualType::GCAttrTypes GCAttrs, LValue& R) {
+ if (GCAttrs == QualType::Weak)
+ R.ObjCType = Weak;
+ else if (GCAttrs == QualType::Strong)
+ R.ObjCType = Strong;
+ else
+ R.ObjCType = None;
+ }
+
+ // simple lvalue
+ llvm::Value *getAddress() const { assert(isSimple()); return V; }
+ // vector elt lvalue
+ llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+ // extended vector elements.
+ llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ llvm::Constant *getExtVectorElts() const {
+ assert(isExtVectorElt());
+ return VectorElts;
+ }
+ // bitfield lvalue
+ llvm::Value *getBitfieldAddr() const { assert(isBitfield()); return V; }
+ unsigned short getBitfieldStartBit() const {
+ assert(isBitfield());
+ return BitfieldData.StartBit;
+ }
+ unsigned short getBitfieldSize() const {
+ assert(isBitfield());
+ return BitfieldData.Size;
+ }
+ bool isBitfieldSigned() const {
+ assert(isBitfield());
+ return BitfieldData.IsSigned;
+ }
+ // property ref lvalue
+ const ObjCPropertyRefExpr *getPropertyRefExpr() const {
+ assert(isPropertyRef());
+ return PropertyRefExpr;
+ }
+
+ // 'implicit' property ref lvalue
+ const ObjCKVCRefExpr *getKVCRefExpr() const {
+ assert(isKVCRef());
+ return KVCRefExpr;
+ }
+
+ static LValue MakeAddr(llvm::Value *V, unsigned Qualifiers,
+ QualType::GCAttrTypes GCAttrs = QualType::GCNone) {
+ LValue R;
+ R.LVType = Simple;
+ R.V = V;
+ SetQualifiers(Qualifiers,R);
+ SetObjCType(GCAttrs, R);
+ return R;
+ }
+
+ static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = VectorElt;
+ R.V = Vec;
+ R.VectorIdx = Idx;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = ExtVectorElt;
+ R.V = Vec;
+ R.VectorElts = Elts;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ static LValue MakeBitfield(llvm::Value *V, unsigned short StartBit,
+ unsigned short Size, bool IsSigned,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = BitField;
+ R.V = V;
+ R.BitfieldData.StartBit = StartBit;
+ R.BitfieldData.Size = Size;
+ R.BitfieldData.IsSigned = IsSigned;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ // FIXME: It is probably bad that we aren't emitting the target when we build
+ // the lvalue. However, this complicates the code a bit, and I haven't figured
+ // out how to make it go wrong yet.
+ static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = PropertyRef;
+ R.PropertyRefExpr = E;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ static LValue MakeKVCRef(const ObjCKVCRefExpr *E, unsigned Qualifiers) {
+ LValue R;
+ R.LVType = KVCRef;
+ R.KVCRefExpr = E;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
new file mode 100644
index 0000000..d6c46a8
--- /dev/null
+++ b/lib/CodeGen/CMakeLists.txt
@@ -0,0 +1,24 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangCodeGen
+ CGBuiltin.cpp
+ CGBlocks.cpp
+ CGCall.cpp
+ CGCXX.cpp
+ CGDebugInfo.cpp
+ CGDecl.cpp
+ CGExprAgg.cpp
+ CGExprComplex.cpp
+ CGExprConstant.cpp
+ CGExpr.cpp
+ CGExprScalar.cpp
+ CGObjC.cpp
+ CGObjCGNU.cpp
+ CGObjCMac.cpp
+ CGStmt.cpp
+ CodeGenFunction.cpp
+ CodeGenModule.cpp
+ CodeGenTypes.cpp
+ Mangle.cpp
+ ModuleBuilder.cpp
+ )
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..672f6da
--- /dev/null
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,714 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+ : BlockFunction(cgm, *this, Builder), CGM(cgm),
+ Target(CGM.getContext().Target),
+ DebugInfo(0), SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ CXXThisDecl(0) {
+ LLVMIntTy = ConvertType(getContext().IntTy);
+ LLVMPointerWidth = Target.getPointerWidth(0);
+}
+
+ASTContext &CodeGenFunction::getContext() const {
+ return CGM.getContext();
+}
+
+
+llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
+ llvm::BasicBlock *&BB = LabelMap[S];
+ if (BB) return BB;
+
+ // Create, but don't insert, the new block.
+ return BB = createBasicBlock(S->getName());
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
+ llvm::Value *Res = LocalDeclMap[VD];
+ assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return Res;
+}
+
+llvm::Constant *
+CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+ return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+}
+
+const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+ return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+}
+
+bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
+ // FIXME: Use positive checks instead of negative ones to be more robust in
+ // the face of extension.
+ return !T->hasPointerRepresentation() &&!T->isRealType() &&
+ !T->isVoidType() && !T->isVectorType() && !T->isFunctionType() &&
+ !T->isBlockPointerType();
+}
+
+void CodeGenFunction::EmitReturnBlock() {
+ // For cleanliness, we try to avoid emitting the return block for
+ // simple cases.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (CurBB) {
+ assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+ // We have a valid insert point, reuse it if there are no explicit
+ // jumps to the return block.
+ if (ReturnBlock->use_empty())
+ delete ReturnBlock;
+ else
+ EmitBlock(ReturnBlock);
+ return;
+ }
+
+ // Otherwise, if the return block is the target of a single direct
+ // branch then we can just put the code in that block instead. This
+ // cleans up functions which started with a unified return block.
+ if (ReturnBlock->hasOneUse()) {
+ llvm::BranchInst *BI =
+ dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
+ if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
+ // Reset insertion point and delete the branch.
+ Builder.SetInsertPoint(BI->getParent());
+ BI->eraseFromParent();
+ delete ReturnBlock;
+ return;
+ }
+ }
+
+ // FIXME: We are at an unreachable point, there is no reason to emit the block
+ // unless it has uses. However, we still need a place to put the debug
+ // region.end for now.
+
+ EmitBlock(ReturnBlock);
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+ // Finish emission of indirect switches.
+ EmitIndirectSwitches();
+
+ assert(BreakContinueStack.empty() &&
+ "mismatched push/pop in break/continue stack!");
+ assert(BlockScopes.empty() &&
+ "did not remove all blocks from block scope map!");
+ assert(CleanupEntries.empty() &&
+ "mismatched push/pop in cleanup stack!");
+
+ // Emit function epilog (to return).
+ EmitReturnBlock();
+
+ // Emit debug descriptor for function end.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(EndLoc);
+ DI->EmitRegionEnd(CurFn, Builder);
+ }
+
+ EmitFunctionEpilog(*CurFnInfo, ReturnValue);
+
+ // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+ llvm::Instruction *Ptr = AllocaInsertPt;
+ AllocaInsertPt = 0;
+ Ptr->eraseFromParent();
+}
+
+void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc) {
+ DidCallStackSave = false;
+ CurCodeDecl = CurFuncDecl = D;
+ FnRetTy = RetTy;
+ CurFn = Fn;
+ assert(CurFn->isDeclaration() && "Function already has body?");
+
+ llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+ // Create a marker to make it easy to insert allocas into the entryblock
+ // later. Don't create this with the builder, because we don't want it
+ // folded.
+ llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::Int32Ty, "",
+ EntryBB);
+ if (Builder.isNamePreserving())
+ AllocaInsertPt->setName("allocapt");
+
+ ReturnBlock = createBasicBlock("return");
+ ReturnValue = 0;
+ if (!RetTy->isVoidType())
+ ReturnValue = CreateTempAlloca(ConvertType(RetTy), "retval");
+
+ Builder.SetInsertPoint(EntryBB);
+
+ // Emit subprogram debug descriptor.
+ // FIXME: The cast here is a huge hack.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(StartLoc);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ DI->EmitFunctionStart(CGM.getMangledName(FD), RetTy, CurFn, Builder);
+ } else {
+ // Just use LLVM function name.
+ DI->EmitFunctionStart(Fn->getName().c_str(),
+ RetTy, CurFn, Builder);
+ }
+ }
+
+ // FIXME: Leaked.
+ CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args);
+ EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+ // If any of the arguments have a variably modified type, make sure to
+ // emit the type size.
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ QualType Ty = i->second;
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+}
+
+void CodeGenFunction::GenerateCode(const FunctionDecl *FD,
+ llvm::Function *Fn) {
+ // Check if we should generate debug info for this function.
+ if (CGM.getDebugInfo() && !FD->hasAttr<NodebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+
+ FunctionArgList Args;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isInstance()) {
+ // Create the implicit 'this' decl.
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, SourceLocation(),
+ &getContext().Idents.get("this"),
+ MD->getThisType(getContext()));
+ Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
+ }
+ }
+
+ if (FD->getNumParams()) {
+ const FunctionProtoType* FProto = FD->getType()->getAsFunctionProtoType();
+ assert(FProto && "Function def must have prototype!");
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(std::make_pair(FD->getParamDecl(i),
+ FProto->getArgType(i)));
+ }
+
+ // FIXME: Support CXXTryStmt here, too.
+ if (const CompoundStmt *S = FD->getCompoundBody(getContext())) {
+ StartFunction(FD, FD->getResultType(), Fn, Args, S->getLBracLoc());
+ EmitStmt(S);
+ FinishFunction(S->getRBracLoc());
+ }
+
+ // Destroy the 'this' declaration.
+ if (CXXThisDecl)
+ CXXThisDecl->Destroy(getContext());
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it. If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a label, we have to emit the code, consider something like:
+ // if (0) { ... foo: bar(); } goto foo;
+ if (isa<LabelStmt>(S))
+ return true;
+
+ // If this is a case/default statement, and we haven't seen a switch, we have
+ // to emit the code.
+ if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+ return true;
+
+ // If this is a switch statement, we want to ignore cases below it.
+ if (isa<SwitchStmt>(S))
+ IgnoreCaseStmts = true;
+
+ // Scan subexpressions for verboten labels.
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (ContainsLabel(*I, IgnoreCaseStmts))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
+/// a constant, or if it does but contains a label, return 0. If it constant
+/// folds to 'true' and does not contain a label, return 1, if it constant folds
+/// to 'false' and does not contain a label, return -1.
+int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
+ // FIXME: Rename and handle conversion of other evaluatable things
+ // to bool.
+ Expr::EvalResult Result;
+ if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
+ Result.HasSideEffects)
+ return 0; // Not foldable, not integer or not fully evaluatable.
+
+ if (CodeGenFunction::ContainsLabel(Cond))
+ return 0; // Contains a label.
+
+ return Result.Val.getInt().getBoolValue() ? 1 : -1;
+}
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks. Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
+ return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
+
+ if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+ // Handle X && Y in a condition.
+ if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
+ // If we have "1 && X", simplify the code. "0 && X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
+ // br(1 && X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X && 1", simplify the code to use an uncond branch.
+ // "X && 0" would have been constant folded to 0.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
+ // br(X && 1) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is false, we
+ // want to jump to the FalseBlock.
+ llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+ EmitBlock(LHSTrue);
+
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
+ // If we have "0 || X", simplify the code. "1 || X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
+ // br(0 || X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X || 0", simplify the code to use an uncond branch.
+ // "X || 1" would have been constant folded to 1.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
+ // br(X || 0) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is true, we
+ // want to jump to the TrueBlock.
+ llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+ EmitBlock(LHSFalse);
+
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ }
+ }
+
+ if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+ // br(!x, t, f) -> br(x, f, t)
+ if (CondUOp->getOpcode() == UnaryOperator::LNot)
+ return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+ }
+
+ if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+ // Handle ?: operator.
+
+ // Just ignore GNU ?: extension.
+ if (CondOp->getLHS()) {
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ }
+ }
+
+ // Emit the code with the fully general case.
+ llvm::Value *CondV = EvaluateExprAsBool(Cond);
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+}
+
+/// getCGRecordLayout - Return record layout info.
+const CGRecordLayout *CodeGenFunction::getCGRecordLayout(CodeGenTypes &CGT,
+ QualType Ty) {
+ const RecordType *RTy = Ty->getAsRecordType();
+ assert (RTy && "Unexpected type. RecordType expected here.");
+
+ return CGT.getCGRecordLayout(RTy->getDecl());
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ CGM.ErrorUnsupported(S, Type, OmitOnError);
+}
+
+unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) {
+ // Use LabelIDs.size() as the new ID if one hasn't been assigned.
+ return LabelIDs.insert(std::make_pair(L, LabelIDs.size())).first->second;
+}
+
+void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+ // Don't bother emitting a zero-byte memset.
+ if (TypeInfo.first == 0)
+ return;
+
+ // FIXME: Handle variable sized types.
+ const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
+
+ Builder.CreateCall4(CGM.getMemSetFn(), DestPtr,
+ llvm::ConstantInt::getNullValue(llvm::Type::Int8Ty),
+ // TypeInfo.first describes size in bits.
+ llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ TypeInfo.second/8));
+}
+
+void CodeGenFunction::EmitIndirectSwitches() {
+ llvm::BasicBlock *Default;
+
+ if (IndirectSwitches.empty())
+ return;
+
+ if (!LabelIDs.empty()) {
+ Default = getBasicBlockForLabel(LabelIDs.begin()->first);
+ } else {
+ // No possible targets for indirect goto, just emit an infinite
+ // loop.
+ Default = createBasicBlock("indirectgoto.loop", CurFn);
+ llvm::BranchInst::Create(Default, Default);
+ }
+
+ for (std::vector<llvm::SwitchInst*>::iterator i = IndirectSwitches.begin(),
+ e = IndirectSwitches.end(); i != e; ++i) {
+ llvm::SwitchInst *I = *i;
+
+ I->setSuccessor(0, Default);
+ for (std::map<const LabelStmt*,unsigned>::iterator LI = LabelIDs.begin(),
+ LE = LabelIDs.end(); LI != LE; ++LI) {
+ I->addCase(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ LI->second),
+ getBasicBlockForLabel(LI->first));
+ }
+ }
+}
+
+llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT)
+{
+ llvm::Value *&SizeEntry = VLASizeMap[VAT];
+
+ assert(SizeEntry && "Did not emit size for type");
+ return SizeEntry;
+}
+
+llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty)
+{
+ assert(Ty->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
+ llvm::Value *&SizeEntry = VLASizeMap[VAT];
+
+ if (!SizeEntry) {
+ // Get the element size;
+ llvm::Value *ElemSize;
+
+ QualType ElemTy = VAT->getElementType();
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ if (ElemTy->isVariableArrayType())
+ ElemSize = EmitVLASize(ElemTy);
+ else {
+ ElemSize = llvm::ConstantInt::get(SizeTy,
+ getContext().getTypeSize(ElemTy) / 8);
+ }
+
+ llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
+ NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
+
+ SizeEntry = Builder.CreateMul(ElemSize, NumElements);
+ }
+
+ return SizeEntry;
+ } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ EmitVLASize(AT->getElementType());
+ } else if (const PointerType *PT = Ty->getAsPointerType())
+ EmitVLASize(PT->getPointeeType());
+ else {
+ assert(0 && "unknown VM type!");
+ }
+
+ return 0;
+}
+
+llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+ if (CGM.getContext().getBuiltinVaListType()->isArrayType()) {
+ return EmitScalarExpr(E);
+ }
+ return EmitLValue(E).getAddress();
+}
+
+void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupBlock)
+{
+ CleanupEntries.push_back(CleanupEntry(CleanupBlock));
+}
+
+void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize)
+{
+ assert(CleanupEntries.size() >= OldCleanupStackSize &&
+ "Cleanup stack mismatch!");
+
+ while (CleanupEntries.size() > OldCleanupStackSize)
+ EmitCleanupBlock();
+}
+
+CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock()
+{
+ CleanupEntry &CE = CleanupEntries.back();
+
+ llvm::BasicBlock *CleanupBlock = CE.CleanupBlock;
+
+ std::vector<llvm::BasicBlock *> Blocks;
+ std::swap(Blocks, CE.Blocks);
+
+ std::vector<llvm::BranchInst *> BranchFixups;
+ std::swap(BranchFixups, CE.BranchFixups);
+
+ CleanupEntries.pop_back();
+
+ // Check if any branch fixups pointed to the scope we just popped. If so,
+ // we can remove them.
+ for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+ llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
+ BlockScopeMap::iterator I = BlockScopes.find(Dest);
+
+ if (I == BlockScopes.end())
+ continue;
+
+ assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
+
+ if (I->second == CleanupEntries.size()) {
+ // We don't need to do this branch fixup.
+ BranchFixups[i] = BranchFixups.back();
+ BranchFixups.pop_back();
+ i--;
+ e--;
+ continue;
+ }
+ }
+
+ llvm::BasicBlock *SwitchBlock = 0;
+ llvm::BasicBlock *EndBlock = 0;
+ if (!BranchFixups.empty()) {
+ SwitchBlock = createBasicBlock("cleanup.switch");
+ EndBlock = createBasicBlock("cleanup.end");
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ Builder.SetInsertPoint(SwitchBlock);
+
+ llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::Int32Ty,
+ "cleanup.dst");
+ llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+
+ // Create a switch instruction to determine where to jump next.
+ llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
+ BranchFixups.size());
+
+ // Restore the current basic block (if any)
+ if (CurBB) {
+ Builder.SetInsertPoint(CurBB);
+
+ // If we had a current basic block, we also need to emit an instruction
+ // to initialize the cleanup destination.
+ Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::Int32Ty),
+ DestCodePtr);
+ } else
+ Builder.ClearInsertionPoint();
+
+ for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+ llvm::BranchInst *BI = BranchFixups[i];
+ llvm::BasicBlock *Dest = BI->getSuccessor(0);
+
+ // Fixup the branch instruction to point to the cleanup block.
+ BI->setSuccessor(0, CleanupBlock);
+
+ if (CleanupEntries.empty()) {
+ llvm::ConstantInt *ID;
+
+ // Check if we already have a destination for this block.
+ if (Dest == SI->getDefaultDest())
+ ID = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ else {
+ ID = SI->findCaseDest(Dest);
+ if (!ID) {
+ // No code found, get a new unique one by using the number of
+ // switch successors.
+ ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ SI->getNumSuccessors());
+ SI->addCase(ID, Dest);
+ }
+ }
+
+ // Store the jump destination before the branch instruction.
+ new llvm::StoreInst(ID, DestCodePtr, BI);
+ } else {
+ // We need to jump through another cleanup block. Create a pad block
+ // with a branch instruction that jumps to the final destination and
+ // add it as a branch fixup to the current cleanup scope.
+
+ // Create the pad block.
+ llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
+
+ // Create a unique case ID.
+ llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ SI->getNumSuccessors());
+
+ // Store the jump destination before the branch instruction.
+ new llvm::StoreInst(ID, DestCodePtr, BI);
+
+ // Add it as the destination.
+ SI->addCase(ID, CleanupPad);
+
+ // Create the branch to the final destination.
+ llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
+ CleanupPad->getInstList().push_back(BI);
+
+ // And add it as a branch fixup.
+ CleanupEntries.back().BranchFixups.push_back(BI);
+ }
+ }
+ }
+
+ // Remove all blocks from the block scope map.
+ for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
+ assert(BlockScopes.count(Blocks[i]) &&
+ "Did not find block in scope map!");
+
+ BlockScopes.erase(Blocks[i]);
+ }
+
+ return CleanupBlockInfo(CleanupBlock, SwitchBlock, EndBlock);
+}
+
+void CodeGenFunction::EmitCleanupBlock()
+{
+ CleanupBlockInfo Info = PopCleanupBlock();
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ if (CurBB && !CurBB->getTerminator() &&
+ Info.CleanupBlock->getNumUses() == 0) {
+ CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
+ delete Info.CleanupBlock;
+ } else
+ EmitBlock(Info.CleanupBlock);
+
+ if (Info.SwitchBlock)
+ EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ EmitBlock(Info.EndBlock);
+}
+
+void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI)
+{
+ assert(!CleanupEntries.empty() &&
+ "Trying to add branch fixup without cleanup block!");
+
+ // FIXME: We could be more clever here and check if there's already a branch
+ // fixup for this destination and recycle it.
+ CleanupEntries.back().BranchFixups.push_back(BI);
+}
+
+void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest)
+{
+ if (!HaveInsertPoint())
+ return;
+
+ llvm::BranchInst* BI = Builder.CreateBr(Dest);
+
+ Builder.ClearInsertionPoint();
+
+ // The stack is empty, no need to do any cleanup.
+ if (CleanupEntries.empty())
+ return;
+
+ if (!Dest->getParent()) {
+ // We are trying to branch to a block that hasn't been inserted yet.
+ AddBranchFixup(BI);
+ return;
+ }
+
+ BlockScopeMap::iterator I = BlockScopes.find(Dest);
+ if (I == BlockScopes.end()) {
+ // We are trying to jump to a block that is outside of any cleanup scope.
+ AddBranchFixup(BI);
+ return;
+ }
+
+ assert(I->second < CleanupEntries.size() &&
+ "Trying to branch into cleanup region");
+
+ if (I->second == CleanupEntries.size() - 1) {
+ // We have a branch to a block in the same scope.
+ return;
+ }
+
+ AddBranchFixup(BI);
+}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..b7894a4
--- /dev/null
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,900 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
+#define CLANG_CODEGEN_CODEGENFUNCTION_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ValueHandle.h"
+#include <map>
+#include "CGBlocks.h"
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Module;
+ class SwitchInst;
+ class Value;
+}
+
+namespace clang {
+ class ASTContext;
+ class CXXDestructorDecl;
+ class Decl;
+ class EnumConstantDecl;
+ class FunctionDecl;
+ class FunctionProtoType;
+ class LabelStmt;
+ class ObjCContainerDecl;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCPropertyImplDecl;
+ class TargetInfo;
+ class VarDecl;
+ class ObjCForCollectionStmt;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenTypes;
+ class CGDebugInfo;
+ class CGFunctionInfo;
+ class CGRecordLayout;
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public BlockFunction {
+ CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+public:
+ CodeGenModule &CGM; // Per-module state.
+ TargetInfo &Target;
+
+ typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+ CGBuilderTy Builder;
+
+ /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
+ /// This excludes BlockDecls.
+ const Decl *CurFuncDecl;
+ /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+ const Decl *CurCodeDecl;
+ const CGFunctionInfo *CurFnInfo;
+ QualType FnRetTy;
+ llvm::Function *CurFn;
+
+ /// ReturnBlock - Unified return block.
+ llvm::BasicBlock *ReturnBlock;
+ /// ReturnValue - The temporary alloca to hold the return value. This is null
+ /// iff the function has no return value.
+ llvm::Instruction *ReturnValue;
+
+ /// AllocaInsertPoint - This is an instruction in the entry block before which
+ /// we prefer to insert allocas.
+ llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+ const llvm::Type *LLVMIntTy;
+ uint32_t LLVMPointerWidth;
+
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+ /// PushCleanupBlock - Push a new cleanup entry on the stack and set the
+ /// passed in block as the cleanup block.
+ void PushCleanupBlock(llvm::BasicBlock *CleanupBlock);
+
+ /// CleanupBlockInfo - A struct representing a popped cleanup block.
+ struct CleanupBlockInfo {
+ /// CleanupBlock - the cleanup block
+ llvm::BasicBlock *CleanupBlock;
+
+ /// SwitchBlock - the block (if any) containing the switch instruction used
+ /// for jumping to the final destination.
+ llvm::BasicBlock *SwitchBlock;
+
+ /// EndBlock - the default destination for the switch instruction.
+ llvm::BasicBlock *EndBlock;
+
+ CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb,
+ llvm::BasicBlock *eb)
+ : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb) {}
+ };
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all
+ /// branch fixups and return a block info struct with the switch block and end
+ /// block.
+ CleanupBlockInfo PopCleanupBlock();
+
+ /// CleanupScope - RAII object that will create a cleanup block and set the
+ /// insert point to that block. When destructed, it sets the insert point to
+ /// the previous block and pushes a new cleanup entry on the stack.
+ class CleanupScope {
+ CodeGenFunction& CGF;
+ llvm::BasicBlock *CurBB;
+ llvm::BasicBlock *CleanupBB;
+
+ public:
+ CleanupScope(CodeGenFunction &cgf)
+ : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()) {
+ CleanupBB = CGF.createBasicBlock("cleanup");
+ CGF.Builder.SetInsertPoint(CleanupBB);
+ }
+
+ ~CleanupScope() {
+ CGF.PushCleanupBlock(CleanupBB);
+ CGF.Builder.SetInsertPoint(CurBB);
+ }
+ };
+
+ /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
+ /// blocks that have been added.
+ void EmitCleanupBlocks(size_t OldCleanupStackSize);
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert block
+ /// through the cleanup handling code (if any) and then on to \arg Dest.
+ ///
+ /// FIXME: Maybe this should really be in EmitBranch? Don't we always want
+ /// this behavior for branches?
+ void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+
+private:
+ CGDebugInfo* DebugInfo;
+
+ /// LabelIDs - Track arbitrary ids assigned to labels for use in implementing
+ /// the GCC address-of-label extension and indirect goto. IDs are assigned to
+ /// labels inside getIDForAddrOfLabel().
+ std::map<const LabelStmt*, unsigned> LabelIDs;
+
+ /// IndirectSwitches - Record the list of switches for indirect
+ /// gotos. Emission of the actual switching code needs to be delayed until all
+ /// AddrLabelExprs have been seen.
+ std::vector<llvm::SwitchInst*> IndirectSwitches;
+
+ /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+ /// decls.
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+
+ /// LabelMap - This keeps track of the LLVM basic block for each C label.
+ llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap;
+
+ // BreakContinueStack - This keeps track of where break and continue
+ // statements should jump to.
+ struct BreakContinue {
+ BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb)
+ : BreakBlock(bb), ContinueBlock(cb) {}
+
+ llvm::BasicBlock *BreakBlock;
+ llvm::BasicBlock *ContinueBlock;
+ };
+ llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ /// SwitchInsn - This is nearest current switch instruction. It is null if if
+ /// current context is not in a switch.
+ llvm::SwitchInst *SwitchInsn;
+
+ /// CaseRangeBlock - This block holds if condition check for last case
+ /// statement range in current switch instruction.
+ llvm::BasicBlock *CaseRangeBlock;
+
+ /// InvokeDest - This is the nearest exception target for calls
+ /// which can unwind, when exceptions are being used.
+ llvm::BasicBlock *InvokeDest;
+
+ // VLASizeMap - This keeps track of the associated size for each VLA type.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const VariableArrayType*, llvm::Value*> VLASizeMap;
+
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
+ struct CleanupEntry {
+ /// CleanupBlock - The block of code that does the actual cleanup.
+ llvm::BasicBlock *CleanupBlock;
+
+ /// Blocks - Basic blocks that were emitted in the current cleanup scope.
+ std::vector<llvm::BasicBlock *> Blocks;
+
+ /// BranchFixups - Branch instructions to basic blocks that haven't been
+ /// inserted into the current function yet.
+ std::vector<llvm::BranchInst *> BranchFixups;
+
+ explicit CleanupEntry(llvm::BasicBlock *cb)
+ : CleanupBlock(cb) {}
+ };
+
+ /// CleanupEntries - Stack of cleanup entries.
+ llvm::SmallVector<CleanupEntry, 8> CleanupEntries;
+
+ typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap;
+
+ /// BlockScopes - Map of which "cleanup scope" scope basic blocks have.
+ BlockScopeMap BlockScopes;
+
+ /// CXXThisDecl - When parsing an C++ function, this will hold the implicit
+ /// 'this' declaration.
+ ImplicitParamDecl *CXXThisDecl;
+
+ llvm::SmallVector<const CXXTemporary*, 4> LiveTemporaries;
+
+public:
+ CodeGenFunction(CodeGenModule &cgm);
+
+ ASTContext &getContext() const;
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+
+ llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
+ void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C
+ //===--------------------------------------------------------------------===//
+
+ void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+ void StartObjCMethod(const ObjCMethodDecl *MD,
+ const ObjCContainerDecl *CD);
+
+ /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+ void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+
+ /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+ /// for the given property.
+ void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
+ llvm::Constant *BuildDescriptorBlockDecl(bool BlockHasCopyDispose,
+ uint64_t Size,
+ const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr,
+ const BlockInfo& Info,
+ const Decl *OuterFuncDecl,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm,
+ uint64_t &Size, uint64_t &Align,
+ llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
+ bool &subBlockHasCopyDispose);
+
+ void BlockForwardSelf();
+ llvm::Value *LoadBlockStruct();
+
+ llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E);
+
+ const llvm::Type *BuildByRefType(QualType Ty, uint64_t Align);
+
+ void GenerateCode(const FunctionDecl *FD,
+ llvm::Function *Fn);
+ void StartFunction(const Decl *D, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc);
+
+ /// EmitReturnBlock - Emit the unified return block, trying to avoid its
+ /// emission when possible.
+ void EmitReturnBlock();
+
+ /// FinishFunction - Complete IR generation of the current function. It is
+ /// legal to call this function even if there is no current insertion point.
+ void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+ /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+ /// arguments for the given function. This is also responsible for naming the
+ /// LLVM function arguments.
+ void EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args);
+
+ /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+ /// given temporary.
+ void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue);
+
+ const llvm::Type *ConvertTypeForMem(QualType T);
+ const llvm::Type *ConvertType(QualType T);
+
+ /// LoadObjCSelf - Load the value of self. This function is only valid while
+ /// generating code for an Objective-C method.
+ llvm::Value *LoadObjCSelf();
+
+ /// TypeOfSelfObject - Return type of object that this self represents.
+ QualType TypeOfSelfObject();
+
+ /// hasAggregateLLVMType - Return true if the specified AST type will map into
+ /// an aggregate LLVM type or is void.
+ static bool hasAggregateLLVMType(QualType T);
+
+ /// createBasicBlock - Create an LLVM basic block.
+ llvm::BasicBlock *createBasicBlock(const char *Name="",
+ llvm::Function *Parent=0,
+ llvm::BasicBlock *InsertBefore=0) {
+#ifdef NDEBUG
+ return llvm::BasicBlock::Create("", Parent, InsertBefore);
+#else
+ return llvm::BasicBlock::Create(Name, Parent, InsertBefore);
+#endif
+ }
+
+ /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+ /// label maps to.
+ llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S);
+
+ /// SimplifyForwardingBlocks - If the given basic block is only a
+ /// branch to another basic block, simplify it. This assumes that no
+ /// other code could potentially reference the basic block.
+ void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+ /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+ /// adding a fall-through branch from the current insert block if
+ /// necessary. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// IsFinished - If true, indicates that the caller has finished emitting
+ /// branches to the given block and does not expect to emit code into it. This
+ /// means the block can be ignored if it is unreachable.
+ void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+ /// EmitBranch - Emit a branch to the specified basic block from the current
+ /// insert block, taking care to avoid creation of branches from dummy
+ /// blocks. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// This function clears the current insertion point. The caller should follow
+ /// calls to this function with calls to Emit*Block prior to generation new
+ /// code.
+ void EmitBranch(llvm::BasicBlock *Block);
+
+ /// HaveInsertPoint - True if an insertion point is defined. If not, this
+ /// indicates that the current code being emitted is unreachable.
+ bool HaveInsertPoint() const {
+ return Builder.GetInsertBlock() != 0;
+ }
+
+ /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+ /// emitted IR has a place to go. Note that by definition, if this function
+ /// creates a block then that block is unreachable; callers may do better to
+ /// detect when no insertion point is defined and simply skip IR generation.
+ void EnsureInsertPoint() {
+ if (!HaveInsertPoint())
+ EmitBlock(createBasicBlock());
+ }
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ //===--------------------------------------------------------------------===//
+ // Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+ /// block.
+ llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
+ const char *Name = "tmp");
+
+ /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+ /// expression and compare the result against zero, returning an Int1Ty value.
+ llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+ /// EmitAnyExpr - Emit code to compute the specified expression which can have
+ /// any type. The result is returned as an RValue struct. If this is an
+ /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+ /// the result should be returned.
+ ///
+ /// \param IgnoreResult - True if the resulting value isn't used.
+ RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0,
+ bool isAggLocVolatile = false, bool IgnoreResult = false);
+
+ // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+ // or the value of the expression, depending on how va_list is defined.
+ llvm::Value *EmitVAListRef(const Expr *E);
+
+ /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// always be accessible even if no aggregate location is provided.
+ RValue EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc = 0,
+ bool isAggLocVolatile = false);
+
+ /// EmitAggregateCopy - Emit an aggrate copy.
+ ///
+ /// \param isVolatile - True iff either the source or the destination is
+ /// volatile.
+ void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false);
+
+ void EmitAggregateClear(llvm::Value *DestPtr, QualType Ty);
+
+ /// StartBlock - Start new block named N. If insert block is a dummy block
+ /// then reuse it.
+ void StartBlock(const char *N);
+
+ /// getCGRecordLayout - Return record layout info.
+ const CGRecordLayout *getCGRecordLayout(CodeGenTypes &CGT, QualType RTy);
+
+ /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
+ llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD);
+
+ /// GetAddrOfLocalVar - Return the address of a local variable.
+ llvm::Value *GetAddrOfLocalVar(const VarDecl *VD);
+
+ /// getAccessedFieldNo - Given an encoded value and a result number, return
+ /// the input field number being accessed.
+ static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+ unsigned GetIDForAddrOfLabel(const LabelStmt *L);
+
+ /// EmitMemSetToZero - Generate code to memset a value of the given type to 0.
+ void EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty);
+
+ // EmitVAArg - Generate code to get an argument from the passed in pointer
+ // and update it accordingly. The return value is a pointer to the argument.
+ // FIXME: We should be able to get rid of this method and use the va_arg
+ // instruction in LLVM instead once it works well enough.
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+
+ // EmitVLASize - Generate code for any VLA size expressions that might occur
+ // in a variably modified type. If Ty is a VLA, will return the value that
+ // corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
+ llvm::Value *EmitVLASize(QualType Ty);
+
+ // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
+ // of a variable length array type.
+ llvm::Value *GetVLASize(const VariableArrayType *);
+
+ /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+ /// generating code for an C++ member function.
+ llvm::Value *LoadCXXThis();
+
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::Value *This);
+
+ void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
+
+ llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Declaration Emission
+ //===--------------------------------------------------------------------===//
+
+ void EmitDecl(const Decl &D);
+ void EmitBlockVarDecl(const VarDecl &D);
+ void EmitLocalBlockVarDecl(const VarDecl &D);
+ void EmitStaticBlockVarDecl(const VarDecl &D);
+
+ /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+ void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+ void EmitStopPoint(const Stmt *S);
+
+ /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+ /// this function even if there is no current insertion point.
+ ///
+ /// This function may clear the current insertion point; callers should use
+ /// EnsureInsertPoint if they wish to subsequently generate code without first
+ /// calling EmitBlock, EmitBranch, or EmitStmt.
+ void EmitStmt(const Stmt *S);
+
+ /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+ /// necessarily require an insertion point or debug information; typically
+ /// because the statement amounts to a jump or a container of other
+ /// statements.
+ ///
+ /// \return True if the statement was handled.
+ bool EmitSimpleStmt(const Stmt *S);
+
+ RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ llvm::Value *AggLoc = 0, bool isAggVol = false);
+
+ /// EmitLabel - Emit the block for the given label. It is legal to call this
+ /// function even if there is no current insertion point.
+ void EmitLabel(const LabelStmt &S); // helper for EmitLabelStmt.
+
+ void EmitLabelStmt(const LabelStmt &S);
+ void EmitGotoStmt(const GotoStmt &S);
+ void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+ void EmitIfStmt(const IfStmt &S);
+ void EmitWhileStmt(const WhileStmt &S);
+ void EmitDoStmt(const DoStmt &S);
+ void EmitForStmt(const ForStmt &S);
+ void EmitReturnStmt(const ReturnStmt &S);
+ void EmitDeclStmt(const DeclStmt &S);
+ void EmitBreakStmt(const BreakStmt &S);
+ void EmitContinueStmt(const ContinueStmt &S);
+ void EmitSwitchStmt(const SwitchStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S);
+ void EmitCaseStmt(const CaseStmt &S);
+ void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitAsmStmt(const AsmStmt &S);
+
+ void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+ void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+ void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+ void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+
+ //===--------------------------------------------------------------------===//
+ // LValue Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+ RValue GetUndefRValue(QualType Ty);
+
+ /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+ /// and issue an ErrorUnsupported style diagnostic (using the
+ /// provided Name).
+ RValue EmitUnsupportedRValue(const Expr *E,
+ const char *Name);
+
+ /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+ /// an ErrorUnsupported style diagnostic (using the provided Name).
+ LValue EmitUnsupportedLValue(const Expr *E,
+ const char *Name);
+
+ /// EmitLValue - Emit code to compute a designator that specifies the location
+ /// of the expression.
+ ///
+ /// This can return one of two things: a simple address or a bitfield
+ /// reference. In either case, the LLVM Value* in the LValue structure is
+ /// guaranteed to be an LLVM pointer type.
+ ///
+ /// If this returns a bitfield reference, nothing about the pointee type of
+ /// the LLVM value is known: For example, it may not be a pointer to an
+ /// integer.
+ ///
+ /// If this returns a normal address, and if the lvalue's C type is fixed
+ /// size, this method guarantees that the returned pointer type will point to
+ /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+ /// variable length type, this is not possible.
+ ///
+ LValue EmitLValue(const Expr *E);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ QualType Ty);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, QualType Ty);
+
+ /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+ /// this method emits the address of the lvalue, then loads the result as an
+ /// rvalue, returning the rvalue.
+ RValue EmitLoadOfLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfPropertyRefLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfKVCRefLValue(LValue LV, QualType ExprType);
+
+
+ /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+ /// lvalue, where both are guaranteed to the have the same type, and that type
+ /// is 'Ty'.
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
+ QualType Ty);
+ void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughKVCRefLValue(RValue Src, LValue Dst, QualType Ty);
+
+ /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
+ /// EmitStoreThroughLValue.
+ ///
+ /// \param Result [out] - If non-null, this will be set to a Value* for the
+ /// bit-field contents after the store, appropriate for use as the result of
+ /// an assignment to the bit-field.
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
+ llvm::Value **Result=0);
+
+ // Note: only availabe for agg return types
+ LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ // Note: only available for agg return types
+ LValue EmitCallExprLValue(const CallExpr *E);
+ // Note: only available for agg return types
+ LValue EmitVAArgExprLValue(const VAArgExpr *E);
+ LValue EmitDeclRefLValue(const DeclRefExpr *E);
+ LValue EmitStringLiteralLValue(const StringLiteral *E);
+ LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+ LValue EmitPredefinedFunctionName(unsigned Type);
+ LValue EmitPredefinedLValue(const PredefinedExpr *E);
+ LValue EmitUnaryOpLValue(const UnaryOperator *E);
+ LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ LValue EmitMemberExpr(const MemberExpr *E);
+ LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitConditionalOperator(const ConditionalOperator *E);
+ LValue EmitCastLValue(const CastExpr *E);
+
+ llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForField(llvm::Value* Base, FieldDecl* Field,
+ bool isUnion, unsigned CVRQualifiers);
+ LValue EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value* Base, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForBitfield(llvm::Value* Base, FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
+
+ LValue EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E);
+ LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+ LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+
+ LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+ LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+ LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
+ LValue EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E);
+ LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
+ LValue EmitStmtExprLValue(const StmtExpr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Scalar Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitCall - Generate a call of the given function, expecting the given
+ /// result type, and using the given argument list which specifies both the
+ /// LLVM arguments and the types they were derived from.
+ ///
+ /// \param TargetDecl - If given, the decl of the function in a
+ /// direct call; used to set attributes on the call (noreturn,
+ /// etc.).
+ RValue EmitCall(const CGFunctionInfo &FnInfo,
+ llvm::Value *Callee,
+ const CallArgList &Args,
+ const Decl *TargetDecl = 0);
+
+ RValue EmitCall(llvm::Value *Callee, QualType FnType,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl = 0);
+ RValue EmitCallExpr(const CallExpr *E);
+
+ RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+ RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E);
+
+ RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD);
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E);
+
+ RValue EmitBlockCallExpr(const CallExpr *E);
+
+ /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+ /// is unhandled by the current target.
+ llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitShuffleVector(llvm::Value* V1, llvm::Value *V2, ...);
+ llvm::Value *EmitVector(llvm::Value * const *Vals, unsigned NumVals,
+ bool isSplat = false);
+
+ llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+ llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E);
+ RValue EmitObjCPropertyGet(const Expr *E);
+ RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S);
+ void EmitObjCPropertySet(const Expr *E, RValue Src);
+ void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
+
+
+ /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
+ /// expression. Will emit a temporary variable if E is not an LValue.
+ RValue EmitReferenceBindingToExpr(const Expr* E, QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ // Expressions are broken into three classes: scalar, complex, aggregate.
+
+ /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+ /// scalar type, returning the result.
+ llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign=false);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+ QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+ QualType DstTy);
+
+
+ /// EmitAggExpr - Emit the computation of the specified expression of
+ /// aggregate type. The result is computed into DestPtr. Note that if
+ /// DestPtr is null, the value of the aggregate expression is not needed.
+ void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest,
+ bool IgnoreResult = false);
+
+ /// EmitComplexExpr - Emit the computation of the specified expression of
+ /// complex type, returning the result.
+ ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
+ bool IgnoreImag = false,
+ bool IgnoreRealAssign = false,
+ bool IgnoreImagAssign = false);
+
+ /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+ /// of complex type, storing into the specified Value*.
+ void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+
+ /// StoreComplexToAddr - Store a complex number into the specified address.
+ void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+ /// LoadComplexFromAddr - Load a complex number from the specified address.
+ ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
+
+ /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global
+ /// for a static block var decl.
+ llvm::GlobalVariable * CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes
+ Linkage);
+
+ /// GenerateStaticCXXBlockVarDecl - Create the initializer for a C++
+ /// runtime initialized static block var decl.
+ void GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+ void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
+
+ RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc = 0,
+ bool isAggLocVolatile = false);
+
+ //===--------------------------------------------------------------------===//
+ // Internal Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// ContainsLabel - Return true if the statement contains a label in it. If
+ /// this statement is not executed normally, it not containing a label means
+ /// that we can just remove the code.
+ static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return 0. If it
+ /// constant folds to 'true' and does not contain a label, return 1, if it
+ /// constant folds to 'false' and does not contain a label, return -1.
+ int ConstantFoldsToSimpleInteger(const Expr *Cond);
+
+ /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+ /// if statement) to the specified blocks. Based on the condition, this might
+ /// try to simplify the codegen of the conditional based on the branch.
+ void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock);
+private:
+
+ /// EmitIndirectSwitches - Emit code for all of the switch
+ /// instructions in IndirectSwitches.
+ void EmitIndirectSwitches();
+
+ void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+ /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+ /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+ ///
+ /// \param AI - The first function argument of the expansion.
+ /// \return The argument following the last expanded function
+ /// argument.
+ llvm::Function::arg_iterator
+ ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ llvm::Function::arg_iterator AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg Args. See
+ /// ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue Src,
+ llvm::SmallVector<llvm::Value*, 16> &Args);
+
+ llvm::Value* EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr, std::string &ConstraintStr);
+
+ /// EmitCleanupBlock - emits a single cleanup block.
+ void EmitCleanupBlock();
+
+ /// AddBranchFixup - adds a branch instruction to the list of fixups for the
+ /// current cleanup scope.
+ void AddBranchFixup(llvm::BranchInst *BI);
+
+ /// EmitCallArg - Emit a single call argument.
+ RValue EmitCallArg(const Expr *E, QualType ArgType);
+
+ /// EmitCallArgs - Emit call arguments for a function.
+ /// The CallArgTypeInfo parameter is used for iterating over the known
+ /// argument types of the function being called.
+ template<typename T>
+ void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+
+ // First, use the argument types that the type info knows about
+ if (CallArgTypeInfo) {
+ for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
+ E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
+ QualType ArgType = *I;
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ }
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+ }
+};
+
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..b69301e
--- /dev/null
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,1543 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGCall.h"
+#include "CGObjCRuntime.h"
+#include "Mangle.h"
+#include "clang/Frontend/CompileOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Module.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+CodeGenModule::CodeGenModule(ASTContext &C, const CompileOptions &compileOpts,
+ llvm::Module &M, const llvm::TargetData &TD,
+ Diagnostic &diags)
+ : BlockModule(C, M, TD, Types, *this), Context(C),
+ Features(C.getLangOptions()), CompileOpts(compileOpts), TheModule(M),
+ TheTargetData(TD), Diags(diags), Types(C, M, TD), Runtime(0),
+ MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0) {
+
+ if (!Features.ObjC1)
+ Runtime = 0;
+ else if (!Features.NeXTRuntime)
+ Runtime = CreateGNUObjCRuntime(*this);
+ else if (Features.ObjCNonFragileABI)
+ Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+ else
+ Runtime = CreateMacObjCRuntime(*this);
+
+ // If debug info generation is enabled, create the CGDebugInfo object.
+ DebugInfo = CompileOpts.DebugInfo ? new CGDebugInfo(this) : 0;
+}
+
+CodeGenModule::~CodeGenModule() {
+ delete Runtime;
+ delete DebugInfo;
+}
+
+void CodeGenModule::Release() {
+ EmitDeferred();
+ if (Runtime)
+ if (llvm::Function *ObjCInitFunction = Runtime->ModuleInitFunction())
+ AddGlobalCtor(ObjCInitFunction);
+ EmitCtorList(GlobalCtors, "llvm.global_ctors");
+ EmitCtorList(GlobalDtors, "llvm.global_dtors");
+ EmitAnnotations();
+ EmitLLVMUsed();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+ << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+LangOptions::VisibilityMode
+CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (VD->getStorageClass() == VarDecl::PrivateExtern)
+ return LangOptions::Hidden;
+
+ if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
+ switch (attr->getVisibility()) {
+ default: assert(0 && "Unknown visibility!");
+ case VisibilityAttr::DefaultVisibility:
+ return LangOptions::Default;
+ case VisibilityAttr::HiddenVisibility:
+ return LangOptions::Hidden;
+ case VisibilityAttr::ProtectedVisibility:
+ return LangOptions::Protected;
+ }
+ }
+
+ return getLangOptions().getVisibilityMode();
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+ const Decl *D) const {
+ // Internal definitions always have default visibility.
+ if (GV->hasLocalLinkage()) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ return;
+ }
+
+ switch (getDeclVisibilityMode(D)) {
+ default: assert(0 && "Unknown visibility!");
+ case LangOptions::Default:
+ return GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ case LangOptions::Hidden:
+ return GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ case LangOptions::Protected:
+ return GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ }
+}
+
+const char *CodeGenModule::getMangledName(const GlobalDecl &GD) {
+ const NamedDecl *ND = GD.getDecl();
+
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ return getMangledCXXCtorName(D, GD.getCtorType());
+ if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ return getMangledCXXDtorName(D, GD.getDtorType());
+
+ return getMangledName(ND);
+}
+
+/// \brief Retrieves the mangled name for the given declaration.
+///
+/// If the given declaration requires a mangled name, returns an
+/// const char* containing the mangled name. Otherwise, returns
+/// the unmangled name.
+///
+const char *CodeGenModule::getMangledName(const NamedDecl *ND) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getLangOptions().CPlusPlus && !ND->hasAttrs()) {
+ assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
+ return ND->getNameAsCString();
+ }
+
+ llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ if (!mangleName(ND, Context, Out)) {
+ assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
+ return ND->getNameAsCString();
+ }
+
+ Name += '\0';
+ return UniqueMangledName(Name.begin(), Name.end());
+}
+
+const char *CodeGenModule::UniqueMangledName(const char *NameStart,
+ const char *NameEnd) {
+ assert(*(NameEnd - 1) == '\0' && "Mangled name must be null terminated!");
+
+ return MangledNames.GetOrCreateValue(NameStart, NameEnd).getKeyData();
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalCtors.push_back(std::make_pair(Ctor, Priority));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalDtors.push_back(std::make_pair(Dtor, Priority));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+ // Ctor function type is void()*.
+ llvm::FunctionType* CtorFTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy,
+ std::vector<const llvm::Type*>(),
+ false);
+ llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+ // Get the type of a ctor entry, { i32, void ()* }.
+ llvm::StructType* CtorStructTy =
+ llvm::StructType::get(llvm::Type::Int32Ty,
+ llvm::PointerType::getUnqual(CtorFTy), NULL);
+
+ // Construct the constructor and destructor arrays.
+ std::vector<llvm::Constant*> Ctors;
+ for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ std::vector<llvm::Constant*> S;
+ S.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, I->second, false));
+ S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
+ Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ }
+
+ if (!Ctors.empty()) {
+ llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+ new llvm::GlobalVariable(AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Ctors),
+ GlobalName,
+ &TheModule);
+ }
+}
+
+void CodeGenModule::EmitAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array =
+ llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
+ Annotations.size()),
+ Annotations);
+ llvm::GlobalValue *gv =
+ new llvm::GlobalVariable(Array->getType(), false,
+ llvm::GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations", &TheModule);
+ gv->setSection("llvm.metadata");
+}
+
+static CodeGenModule::GVALinkage
+GetLinkageForFunction(const FunctionDecl *FD, const LangOptions &Features) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ // C++ member functions defined inside the class are always inline.
+ if (MD->isInline() || !MD->isOutOfLineDefinition())
+ return CodeGenModule::GVA_CXXInline;
+
+ return CodeGenModule::GVA_StrongExternal;
+ }
+
+ // "static" functions get internal linkage.
+ if (FD->getStorageClass() == FunctionDecl::Static)
+ return CodeGenModule::GVA_Internal;
+
+ if (!FD->isInline())
+ return CodeGenModule::GVA_StrongExternal;
+
+ // If the inline function explicitly has the GNU inline attribute on it, or if
+ // this is C89 mode, we use to GNU semantics.
+ if (!Features.C99 && !Features.CPlusPlus) {
+ // extern inline in GNU mode is like C99 inline.
+ if (FD->getStorageClass() == FunctionDecl::Extern)
+ return CodeGenModule::GVA_C99Inline;
+ // Normal inline is a strong symbol.
+ return CodeGenModule::GVA_StrongExternal;
+ } else if (FD->hasActiveGNUInlineAttribute()) {
+ // GCC in C99 mode seems to use a different decision-making
+ // process for extern inline, which factors in previous
+ // declarations.
+ if (FD->isExternGNUInline())
+ return CodeGenModule::GVA_C99Inline;
+ // Normal inline is a strong symbol.
+ return CodeGenModule::GVA_StrongExternal;
+ }
+
+ // The definition of inline changes based on the language. Note that we
+ // have already handled "static inline" above, with the GVA_Internal case.
+ if (Features.CPlusPlus) // inline and extern inline.
+ return CodeGenModule::GVA_CXXInline;
+
+ assert(Features.C99 && "Must be in C99 mode if not in C89 or C++ mode");
+ if (FD->isC99InlineDefinition())
+ return CodeGenModule::GVA_C99Inline;
+
+ return CodeGenModule::GVA_StrongExternal;
+}
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV) {
+ GVALinkage Linkage = GetLinkageForFunction(D, Features);
+
+ if (Linkage == GVA_Internal) {
+ GV->setLinkage(llvm::Function::InternalLinkage);
+ } else if (D->hasAttr<DLLExportAttr>()) {
+ GV->setLinkage(llvm::Function::DLLExportLinkage);
+ } else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>()) {
+ GV->setLinkage(llvm::Function::WeakAnyLinkage);
+ } else if (Linkage == GVA_C99Inline) {
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ GV->setLinkage(llvm::Function::AvailableExternallyLinkage);
+ } else if (Linkage == GVA_CXXInline) {
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ GV->setLinkage(llvm::Function::LinkOnceODRLinkage);
+ } else {
+ assert(Linkage == GVA_StrongExternal);
+ // Otherwise, we have strong external linkage.
+ GV->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ SetCommonAttributes(D, GV);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F) {
+ AttributeListType AttributeList;
+ ConstructAttributeList(Info, D, AttributeList);
+
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.size()));
+
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<FastCallAttr>())
+ F->setCallingConv(llvm::CallingConv::X86_FastCall);
+
+ if (D->hasAttr<StdCallAttr>())
+ F->setCallingConv(llvm::CallingConv::X86_StdCall);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+ llvm::Function *F) {
+ if (!Features.Exceptions && !Features.ObjCNonFragileABI)
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ if (D->hasAttr<AlwaysInlineAttr>())
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
+ if (D->hasAttr<NoinlineAttr>())
+ F->addFnAttr(llvm::Attribute::NoInline);
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ setGlobalVisibility(GV, D);
+
+ if (D->hasAttr<UsedAttr>())
+ AddUsedGlobal(GV);
+
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+ llvm::Function *F,
+ const CGFunctionInfo &FI) {
+ SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributesForDefinition(D, F);
+
+ F->setLinkage(llvm::Function::InternalLinkage);
+
+ SetCommonAttributes(D, F);
+}
+
+void CodeGenModule::SetFunctionAttributes(const FunctionDecl *FD,
+ llvm::Function *F,
+ bool IsIncompleteFunction) {
+ if (!IsIncompleteFunction)
+ SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(FD), F);
+
+ // Only a few attributes are set on declarations; these may later be
+ // overridden by a definition.
+
+ if (FD->hasAttr<DLLImportAttr>()) {
+ F->setLinkage(llvm::Function::DLLImportLinkage);
+ } else if (FD->hasAttr<WeakAttr>() || FD->hasAttr<WeakImportAttr>()) {
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else {
+ F->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
+}
+
+void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMUsed.push_back(GV);
+}
+
+void CodeGenModule::EmitLLVMUsed() {
+ // Don't create llvm.used if there is no need.
+ if (LLVMUsed.empty())
+ return;
+
+ llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, LLVMUsed.size());
+
+ // Convert LLVMUsed to what ConstantArray needs.
+ std::vector<llvm::Constant*> UsedArray;
+ UsedArray.resize(LLVMUsed.size());
+ for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+ UsedArray[i] =
+ llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]), i8PTy);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(ATy, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray),
+ "llvm.used", &getModule());
+
+ GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::EmitDeferred() {
+ // Emit code for any potentially referenced deferred decls. Since a
+ // previously unused static decl may become used during the generation of code
+ // for a static function, iterate until no changes are made.
+ while (!DeferredDeclsToEmit.empty()) {
+ GlobalDecl D = DeferredDeclsToEmit.back();
+ DeferredDeclsToEmit.pop_back();
+
+ // The mangled name for the decl must have been emitted in GlobalDeclMap.
+ // Look it up to see if it was defined with a stronger definition (e.g. an
+ // extern inline function with a strong function redefinition). If so,
+ // just ignore the deferred decl.
+ llvm::GlobalValue *CGRef = GlobalDeclMap[getMangledName(D)];
+ assert(CGRef && "Deferred decl wasn't referenced?");
+
+ if (!CGRef->isDeclaration())
+ continue;
+
+ // Otherwise, emit the definition and move on to the next one.
+ EmitGlobalDefinition(D);
+ }
+}
+
+/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+/// annotation information for a given GlobalValue. The annotation struct is
+/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+/// GlobalValue being annotated. The second field is the constant string
+/// created from the AnnotateAttr's annotation. The third field is a constant
+/// string containing the name of the translation unit. The fourth field is
+/// the line number in the file of the annotated value declaration.
+///
+/// FIXME: this does not unique the annotation string constants, as llvm-gcc
+/// appears to.
+///
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ unsigned LineNo) {
+ llvm::Module *M = &getModule();
+
+ // get [N x i8] constants for the annotation string, and the filename string
+ // which are the 2nd and 3rd elements of the global annotation structure.
+ const llvm::Type *SBP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::Constant *anno = llvm::ConstantArray::get(AA->getAnnotation(), true);
+ llvm::Constant *unit = llvm::ConstantArray::get(M->getModuleIdentifier(),
+ true);
+
+ // Get the two global values corresponding to the ConstantArrays we just
+ // created to hold the bytes of the strings.
+ const char *StringPrefix = getContext().Target.getStringSymbolPrefix(true);
+ llvm::GlobalValue *annoGV =
+ new llvm::GlobalVariable(anno->getType(), false,
+ llvm::GlobalValue::InternalLinkage, anno,
+ GV->getName() + StringPrefix, M);
+ // translation unit name string, emitted into the llvm.metadata section.
+ llvm::GlobalValue *unitGV =
+ new llvm::GlobalVariable(unit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, unit,
+ StringPrefix, M);
+
+ // Create the ConstantStruct for the global annotation.
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantExpr::getBitCast(GV, SBP),
+ llvm::ConstantExpr::getBitCast(annoGV, SBP),
+ llvm::ConstantExpr::getBitCast(unitGV, SBP),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, LineNo)
+ };
+ return llvm::ConstantStruct::get(Fields, 4, false);
+}
+
+bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+ // Never defer when EmitAllDecls is specified or the decl has
+ // attribute used.
+ if (Features.EmitAllDecls || Global->hasAttr<UsedAttr>())
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Constructors and destructors should never be deferred.
+ if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
+ return false;
+
+ GVALinkage Linkage = GetLinkageForFunction(FD, Features);
+
+ // static, static inline, always_inline, and extern inline functions can
+ // always be deferred. Normal inline functions can be deferred in C99/C++.
+ if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
+ Linkage == GVA_CXXInline)
+ return true;
+ return false;
+ }
+
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Invalid decl");
+
+ return VD->getStorageClass() == VarDecl::Static;
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+ const ValueDecl *Global = GD.getDecl();
+
+ // If this is an alias definition (which otherwise looks like a declaration)
+ // emit it now.
+ if (Global->hasAttr<AliasAttr>())
+ return EmitAliasDefinition(Global);
+
+ // Ignore declarations, they will be emitted on their first use.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Forward declarations are emitted lazily on first use.
+ if (!FD->isThisDeclarationADefinition())
+ return;
+ } else {
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+ // In C++, if this is marked "extern", defer code generation.
+ if (getLangOptions().CPlusPlus && !VD->getInit() &&
+ (VD->getStorageClass() == VarDecl::Extern ||
+ VD->isExternC(getContext())))
+ return;
+
+ // In C, if this isn't a definition, defer code generation.
+ if (!getLangOptions().CPlusPlus && !VD->getInit())
+ return;
+ }
+
+ // Defer code generation when possible if this is a static definition, inline
+ // function etc. These we only want to emit if they are used.
+ if (MayDeferGeneration(Global)) {
+ // If the value has already been used, add it directly to the
+ // DeferredDeclsToEmit list.
+ const char *MangledName = getMangledName(GD);
+ if (GlobalDeclMap.count(MangledName))
+ DeferredDeclsToEmit.push_back(GD);
+ else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
+ return;
+ }
+
+ // Otherwise emit the definition.
+ EmitGlobalDefinition(GD);
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+ const ValueDecl *D = GD.getDecl();
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ EmitCXXConstructor(CD, GD.getCtorType());
+ else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ EmitCXXDestructor(DD, GD.getDtorType());
+ else if (isa<FunctionDecl>(D))
+ EmitGlobalFunctionDefinition(GD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ EmitGlobalVarDefinition(VD);
+ else {
+ assert(0 && "Invalid argument to EmitGlobalDefinition()");
+ }
+}
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+ if (Entry) {
+ if (Entry->getType()->getElementType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(Entry, PTy);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+ DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ } else if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl())) {
+ // If this the first reference to a C++ inline function in a class, queue up
+ // the deferred function body for emission. These are not seen as
+ // top-level declarations.
+ if (FD->isThisDeclarationADefinition() && MayDeferGeneration(FD))
+ DeferredDeclsToEmit.push_back(D);
+ }
+
+ // This function doesn't have a complete type (for example, the return
+ // type is an incomplete struct). Use a fake type instead, and make
+ // sure not to try to set attributes.
+ bool IsIncompleteFunction = false;
+ if (!isa<llvm::FunctionType>(Ty)) {
+ Ty = llvm::FunctionType::get(llvm::Type::VoidTy,
+ std::vector<const llvm::Type*>(), false);
+ IsIncompleteFunction = true;
+ }
+ llvm::Function *F = llvm::Function::Create(cast<llvm::FunctionType>(Ty),
+ llvm::Function::ExternalLinkage,
+ "", &getModule());
+ F->setName(MangledName);
+ if (D.getDecl())
+ SetFunctionAttributes(cast<FunctionDecl>(D.getDecl()), F,
+ IsIncompleteFunction);
+ Entry = F;
+ return F;
+}
+
+/// GetAddrOfFunction - Return the address of the given function. If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty) {
+ // If there was no specific requested type, just convert it now.
+ if (!Ty)
+ Ty = getTypes().ConvertType(GD.getDecl()->getType());
+ return GetOrCreateLLVMFunction(getMangledName(GD.getDecl()), Ty, GD);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
+ const char *Name) {
+ // Convert Name to be a uniqued string from the IdentifierInfo table.
+ Name = getContext().Idents.get(Name).getName();
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl());
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *CodeGenModule::GetOrCreateLLVMGlobal(const char *MangledName,
+ const llvm::PointerType*Ty,
+ const VarDecl *D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+ if (Entry) {
+ if (Entry->getType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+ DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Ty->getElementType(), false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "", &getModule(),
+ false, Ty->getAddressSpace());
+ GV->setName(MangledName);
+
+ // Handle things which are present even on external declarations.
+ if (D) {
+ // FIXME: This code is overly simple and should be merged with other global
+ // handling.
+ GV->setConstant(D->getType().isConstant(Context));
+
+ // FIXME: Merge with other attribute handling code.
+ if (D->getStorageClass() == VarDecl::PrivateExtern)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ GV->setThreadLocal(D->isThreadSpecified());
+ }
+
+ return Entry = GV;
+}
+
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable. If Ty is non-null and if the global doesn't exist,
+/// then it will be greated with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty) {
+ assert(D->hasGlobalStorage() && "Not a global variable");
+ QualType ASTTy = D->getType();
+ if (Ty == 0)
+ Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+ const llvm::PointerType *PTy =
+ llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
+ return GetOrCreateLLVMGlobal(getMangledName(D), PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
+ const char *Name) {
+ // Convert Name to be a uniqued string from the IdentifierInfo table.
+ Name = getContext().Idents.get(Name).getName();
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+ assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+ if (MayDeferGeneration(D)) {
+ // If we have not seen a reference to this variable yet, place it
+ // into the deferred declarations table to be emitted if needed
+ // later.
+ const char *MangledName = getMangledName(D);
+ if (GlobalDeclMap.count(MangledName) == 0) {
+ DeferredDecls[MangledName] = GlobalDecl(D);
+ return;
+ }
+ }
+
+ // The tentative definition is the only definition.
+ EmitGlobalVarDefinition(D);
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+ llvm::Constant *Init = 0;
+ QualType ASTTy = D->getType();
+
+ if (D->getInit() == 0) {
+ // This is a tentative definition; tentative definitions are
+ // implicitly initialized with { 0 }.
+ //
+ // Note that tentative definitions are only emitted at the end of
+ // a translation unit, so they should never have incomplete
+ // type. In addition, EmitTentativeDefinition makes sure that we
+ // never attempt to emit a tentative definition if a real one
+ // exists. A use may still exists, however, so we still may need
+ // to do a RAUW.
+ assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+ Init = llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(ASTTy));
+ } else {
+ Init = EmitConstantExpr(D->getInit(), D->getType());
+ if (!Init) {
+ ErrorUnsupported(D, "static initializer");
+ QualType T = D->getInit()->getType();
+ Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+ }
+ }
+
+ const llvm::Type* InitType = Init->getType();
+ llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+ // Entry is now either a Function or GlobalVariable.
+ llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+ // We have a definition after a declaration with the wrong type.
+ // We must make a new GlobalVariable* and update everything that used OldGV
+ // (a declaration or tentative definition) with the new GlobalVariable*
+ // (which will be a definition).
+ //
+ // This happens if there is a prototype for a global (e.g.
+ // "extern int x[];") and then a definition of a different type (e.g.
+ // "int x[10];"). This also happens when an initializer has a different type
+ // from the type of the global (this happens with unions).
+ if (GV == 0 ||
+ GV->getType()->getElementType() != InitType ||
+ GV->getType()->getAddressSpace() != ASTTy.getAddressSpace()) {
+
+ // Remove the old entry from GlobalDeclMap so that we'll create a new one.
+ GlobalDeclMap.erase(getMangledName(D));
+
+ // Make a new global with the correct type, this is now guaranteed to work.
+ GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+ GV->takeName(cast<llvm::GlobalValue>(Entry));
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+ }
+
+ if (const AnnotateAttr *AA = D->getAttr<AnnotateAttr>()) {
+ SourceManager &SM = Context.getSourceManager();
+ AddAnnotation(EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D->getLocation())));
+ }
+
+ GV->setInitializer(Init);
+ GV->setConstant(D->getType().isConstant(Context));
+ GV->setAlignment(getContext().getDeclAlignInBytes(D));
+
+ // Set the llvm linkage type as appropriate.
+ if (D->getStorageClass() == VarDecl::Static)
+ GV->setLinkage(llvm::Function::InternalLinkage);
+ else if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::Function::DLLImportLinkage);
+ else if (D->hasAttr<DLLExportAttr>())
+ GV->setLinkage(llvm::Function::DLLExportLinkage);
+ else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ else if (!CompileOpts.NoCommon &&
+ (!D->hasExternalStorage() && !D->getInit()))
+ GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
+ else
+ GV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D->getLocation());
+ DI->EmitGlobalVariable(GV, D);
+ }
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}". If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn) {
+ // If we're redefining a global as a function, don't transform it.
+ llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
+ if (OldFn == 0) return;
+
+ const llvm::Type *NewRetTy = NewFn->getReturnType();
+ llvm::SmallVector<llvm::Value*, 4> ArgList;
+
+ for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
+ UI != E; ) {
+ // TODO: Do invokes ever occur in C code? If so, we should handle them too.
+ llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*UI++);
+ if (!CI) continue;
+
+ // If the return types don't match exactly, and if the call isn't dead, then
+ // we can't transform this call.
+ if (CI->getType() != NewRetTy && !CI->use_empty())
+ continue;
+
+ // If the function was passed too few arguments, don't transform. If extra
+ // arguments were passed, we silently drop them. If any of the types
+ // mismatch, we don't transform.
+ unsigned ArgNo = 0;
+ bool DontTransform = false;
+ for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
+ E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
+ if (CI->getNumOperands()-1 == ArgNo ||
+ CI->getOperand(ArgNo+1)->getType() != AI->getType()) {
+ DontTransform = true;
+ break;
+ }
+ }
+ if (DontTransform)
+ continue;
+
+ // Okay, we can transform this. Create the new call instruction and copy
+ // over the required information.
+ ArgList.append(CI->op_begin()+1, CI->op_begin()+1+ArgNo);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
+ ArgList.end(), "", CI);
+ ArgList.clear();
+ if (NewCall->getType() != llvm::Type::VoidTy)
+ NewCall->takeName(CI);
+ NewCall->setCallingConv(CI->getCallingConv());
+ NewCall->setAttributes(CI->getAttributes());
+
+ // Finally, remove the old call, replacing any uses with the new one.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ }
+}
+
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+ const llvm::FunctionType *Ty;
+ const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ bool isVariadic = D->getType()->getAsFunctionProtoType()->isVariadic();
+
+ Ty = getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), isVariadic);
+ } else {
+ Ty = cast<llvm::FunctionType>(getTypes().ConvertType(D->getType()));
+
+ // As a special case, make sure that definitions of K&R function
+ // "type foo()" aren't declared as varargs (which forces the backend
+ // to do unnecessary work).
+ if (D->getType()->isFunctionNoProtoType()) {
+ assert(Ty->isVarArg() && "Didn't lower type as expected");
+ // Due to stret, the lowered function could have arguments.
+ // Just create the same type as was lowered by ConvertType
+ // but strip off the varargs bit.
+ std::vector<const llvm::Type*> Args(Ty->param_begin(), Ty->param_end());
+ Ty = llvm::FunctionType::get(Ty->getReturnType(), Args, false);
+ }
+ }
+
+ // Get or create the prototype for the function.
+ llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
+ llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // F is the Function* for the one with the wrong type, we must make a new
+ // Function* and update everything that used F (a declaration) with the new
+ // Function* (which will be a definition).
+ //
+ // This happens if there is a prototype for a function
+ // (e.g. "int f()") and then a definition of a different type
+ // (e.g. "int f(int x)"). Start by making a new function of the
+ // correct type, RAUW, then steal the name.
+ GlobalDeclMap.erase(getMangledName(D));
+ llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
+ NewFn->takeName(OldFn);
+
+ // If this is an implementation of a function without a prototype, try to
+ // replace any existing uses of the function (which may be calls) with uses
+ // of the new function
+ if (D->getType()->isFunctionNoProtoType()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
+ OldFn->removeDeadConstantUsers();
+ }
+
+ // Replace uses of F with the Function we will endow with a body.
+ if (!Entry->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Ok, delete the old function now, which is dead.
+ OldFn->eraseFromParent();
+
+ Entry = NewFn;
+ }
+
+ llvm::Function *Fn = cast<llvm::Function>(Entry);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+ if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+ AddGlobalCtor(Fn, CA->getPriority());
+ if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+ AddGlobalDtor(Fn, DA->getPriority());
+}
+
+void CodeGenModule::EmitAliasDefinition(const ValueDecl *D) {
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ assert(AA && "Not an alias?");
+
+ const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+ // Unique the name through the identifier table.
+ const char *AliaseeName = AA->getAliasee().c_str();
+ AliaseeName = getContext().Idents.get(AliaseeName).getName();
+
+ // Create a reference to the named value. This ensures that it is emitted
+ // if a deferred decl.
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AliaseeName, DeclTy, GlobalDecl());
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AliaseeName,
+ llvm::PointerType::getUnqual(DeclTy), 0);
+
+ // Create the new alias itself, but don't set a name yet.
+ llvm::GlobalValue *GA =
+ new llvm::GlobalAlias(Aliasee->getType(),
+ llvm::Function::ExternalLinkage,
+ "", Aliasee, &getModule());
+
+ // See if there is already something with the alias' name in the module.
+ const char *MangledName = getMangledName(D);
+ llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+
+ if (Entry && !Entry->isDeclaration()) {
+ // If there is a definition in the module, then it wins over the alias.
+ // This is dubious, but allow it to be safe. Just ignore the alias.
+ GA->eraseFromParent();
+ return;
+ }
+
+ if (Entry) {
+ // If there is a declaration in the module, then we had an extern followed
+ // by the alias, as in:
+ // extern int test6();
+ // ...
+ // int test6() __attribute__((alias("test7")));
+ //
+ // Remove it and replace uses of it with the alias.
+
+ Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+ Entry->getType()));
+ Entry->eraseFromParent();
+ }
+
+ // Now we know that there is no conflict, set the name.
+ Entry = GA;
+ GA->setName(MangledName);
+
+ // Set attributes which are particular to an alias; this is a
+ // specialization of the attributes which may be set on a global
+ // variable/function.
+ if (D->hasAttr<DLLExportAttr>()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // The dllexport attribute is ignored for undefined symbols.
+ if (FD->getBody(getContext()))
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ } else {
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ }
+ } else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>()) {
+ GA->setLinkage(llvm::Function::WeakAnyLinkage);
+ }
+
+ SetCommonAttributes(D, GA);
+}
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(unsigned BuiltinID) {
+ assert((Context.BuiltinInfo.isLibFunction(BuiltinID) ||
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
+ "isn't a lib fn");
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ const char *Name = Context.BuiltinInfo.GetName(BuiltinID);
+ if (Context.BuiltinInfo.isLibFunction(BuiltinID))
+ Name += 10;
+
+ // Get the type for the builtin.
+ Builtin::Context::GetBuiltinTypeError Error;
+ QualType Type = Context.BuiltinInfo.GetBuiltinType(BuiltinID, Context, Error);
+ assert(Error == Builtin::Context::GE_None && "Can't get builtin type");
+
+ const llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(Type));
+
+ // Unique the name through the identifier table.
+ Name = getContext().Idents.get(Name).getName();
+ // FIXME: param attributes for sext/zext etc.
+ return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
+ unsigned NumTys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(),
+ (llvm::Intrinsic::ID)IID, Tys, NumTys);
+}
+
+llvm::Function *CodeGenModule::getMemCpyFn() {
+ if (MemCpyFn) return MemCpyFn;
+ const llvm::Type *IntPtr = TheTargetData.getIntPtrType();
+ return MemCpyFn = getIntrinsic(llvm::Intrinsic::memcpy, &IntPtr, 1);
+}
+
+llvm::Function *CodeGenModule::getMemMoveFn() {
+ if (MemMoveFn) return MemMoveFn;
+ const llvm::Type *IntPtr = TheTargetData.getIntPtrType();
+ return MemMoveFn = getIntrinsic(llvm::Intrinsic::memmove, &IntPtr, 1);
+}
+
+llvm::Function *CodeGenModule::getMemSetFn() {
+ if (MemSetFn) return MemSetFn;
+ const llvm::Type *IntPtr = TheTargetData.getIntPtrType();
+ return MemSetFn = getIntrinsic(llvm::Intrinsic::memset, &IntPtr, 1);
+}
+
+static void appendFieldAndPadding(CodeGenModule &CGM,
+ std::vector<llvm::Constant*>& Fields,
+ FieldDecl *FieldD, FieldDecl *NextFieldD,
+ llvm::Constant* Field,
+ RecordDecl* RD, const llvm::StructType *STy) {
+ // Append the field.
+ Fields.push_back(Field);
+
+ int StructFieldNo = CGM.getTypes().getLLVMFieldNo(FieldD);
+
+ int NextStructFieldNo;
+ if (!NextFieldD) {
+ NextStructFieldNo = STy->getNumElements();
+ } else {
+ NextStructFieldNo = CGM.getTypes().getLLVMFieldNo(NextFieldD);
+ }
+
+ // Append padding
+ for (int i = StructFieldNo + 1; i < NextStructFieldNo; i++) {
+ llvm::Constant *C =
+ llvm::Constant::getNullValue(STy->getElementType(StructFieldNo + 1));
+
+ Fields.push_back(C);
+ }
+}
+
+llvm::Constant *CodeGenModule::
+GetAddrOfConstantCFString(const StringLiteral *Literal) {
+ std::string str;
+ unsigned StringLength = 0;
+
+ bool isUTF16 = false;
+ if (Literal->containsNonAsciiOrNull()) {
+ // Convert from UTF-8 to UTF-16.
+ llvm::SmallVector<UTF16, 128> ToBuf(Literal->getByteLength());
+ const UTF8 *FromPtr = (UTF8 *)Literal->getStrData();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ ConversionResult Result;
+ Result = ConvertUTF8toUTF16(&FromPtr, FromPtr+Literal->getByteLength(),
+ &ToPtr, ToPtr+Literal->getByteLength(),
+ strictConversion);
+ if (Result == conversionOK) {
+ // FIXME: Storing UTF-16 in a C string is a hack to test Unicode strings
+ // without doing more surgery to this routine. Since we aren't explicitly
+ // checking for endianness here, it's also a bug (when generating code for
+ // a target that doesn't match the host endianness). Modeling this as an
+ // i16 array is likely the cleanest solution.
+ StringLength = ToPtr-&ToBuf[0];
+ str.assign((char *)&ToBuf[0], StringLength*2);// Twice as many UTF8 chars.
+ isUTF16 = true;
+ } else if (Result == sourceIllegal) {
+ // FIXME: Have Sema::CheckObjCString() validate the UTF-8 string.
+ str.assign(Literal->getStrData(), Literal->getByteLength());
+ StringLength = str.length();
+ } else
+ assert(Result == conversionOK && "UTF-8 to UTF-16 conversion failed");
+
+ } else {
+ str.assign(Literal->getStrData(), Literal->getByteLength());
+ StringLength = str.length();
+ }
+ llvm::StringMapEntry<llvm::Constant *> &Entry =
+ CFConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(llvm::Type::Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ if (!CFConstantStringClassRef) {
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+
+ // FIXME: This is fairly broken if __CFConstantStringClassReference is
+ // already defined, in that it will get renamed and the user will most
+ // likely see an opaque error message. This is a general issue with relying
+ // on particular names.
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalVariable::ExternalLinkage, 0,
+ "__CFConstantStringClassReference",
+ &getModule());
+
+ // Decay array -> ptr
+ CFConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ }
+
+ QualType CFTy = getContext().getCFConstantStringType();
+ RecordDecl *CFRD = CFTy->getAsRecordType()->getDecl();
+
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+ std::vector<llvm::Constant*> Fields;
+ RecordDecl::field_iterator Field = CFRD->field_begin(getContext());
+
+ // Class pointer.
+ FieldDecl *CurField = *Field++;
+ FieldDecl *NextField = *Field++;
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ CFConstantStringClassRef, CFRD, STy);
+
+ // Flags.
+ CurField = NextField;
+ NextField = *Field++;
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0)
+ : llvm::ConstantInt::get(Ty, 0x07C8),
+ CFRD, STy);
+
+ // String pointer.
+ CurField = NextField;
+ NextField = *Field++;
+ llvm::Constant *C = llvm::ConstantArray::get(str);
+
+ const char *Sect, *Prefix;
+ bool isConstant;
+ if (isUTF16) {
+ Prefix = getContext().Target.getUnicodeStringSymbolPrefix();
+ Sect = getContext().Target.getUnicodeStringSection();
+ // FIXME: Why does GCC not set constant here?
+ isConstant = false;
+ } else {
+ Prefix = getContext().Target.getStringSymbolPrefix(true);
+ Sect = getContext().Target.getCFStringDataSection();
+ // FIXME: -fwritable-strings should probably affect this, but we
+ // are following gcc here.
+ isConstant = true;
+ }
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(C->getType(), isConstant,
+ llvm::GlobalValue::InternalLinkage,
+ C, Prefix, &getModule());
+ if (Sect)
+ GV->setSection(Sect);
+ if (isUTF16) {
+ unsigned Align = getContext().getTypeAlign(getContext().ShortTy)/8;
+ GV->setAlignment(Align);
+ }
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2),
+ CFRD, STy);
+
+ // String length.
+ CurField = NextField;
+ NextField = 0;
+ Ty = getTypes().ConvertType(getContext().LongTy);
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ llvm::ConstantInt::get(Ty, StringLength), CFRD, STy);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalVariable::InternalLinkage, C,
+ getContext().Target.getCFStringSymbolPrefix(),
+ &getModule());
+ if (const char *Sect = getContext().Target.getCFStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+/// GetStringForStringLiteral - Return the appropriate bytes for a
+/// string literal, properly padded to match the literal type.
+std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
+ const char *StrData = E->getStrData();
+ unsigned Len = E->getByteLength();
+
+ const ConstantArrayType *CAT =
+ getContext().getAsConstantArrayType(E->getType());
+ assert(CAT && "String isn't pointer or array!");
+
+ // Resize the string to the right size.
+ std::string Str(StrData, StrData+Len);
+ uint64_t RealLen = CAT->getSize().getZExtValue();
+
+ if (E->isWide())
+ RealLen *= getContext().Target.getWCharWidth()/8;
+
+ Str.resize(RealLen, '\0');
+
+ return Str;
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+ // FIXME: This can be more efficient.
+ return GetAddrOfConstantString(GetStringForStringLiteral(S));
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+ std::string Str;
+ getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+ return GetAddrOfConstantCString(Str);
+}
+
+
+/// GenerateWritableString -- Creates storage for a string literal.
+static llvm::Constant *GenerateStringLiteral(const std::string &str,
+ bool constant,
+ CodeGenModule &CGM,
+ const char *GlobalName) {
+ // Create Constant for this string literal. Don't add a '\0'.
+ llvm::Constant *C = llvm::ConstantArray::get(str, false);
+
+ // Create a global variable for this string
+ return new llvm::GlobalVariable(C->getType(), constant,
+ llvm::GlobalValue::InternalLinkage,
+ C, GlobalName, &CGM.getModule());
+}
+
+/// GetAddrOfConstantString - Returns a pointer to a character array
+/// containing the literal. This contents are exactly that of the
+/// given string, i.e. it will not be null terminated automatically;
+/// see GetAddrOfConstantCString. Note that whether the result is
+/// actually a pointer to an LLVM constant depends on
+/// Feature.WriteableStrings.
+///
+/// The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str,
+ const char *GlobalName) {
+ bool IsConstant = !Features.WritableStrings;
+
+ // Get the default prefix if a name wasn't specified.
+ if (!GlobalName)
+ GlobalName = getContext().Target.getStringSymbolPrefix(IsConstant);
+
+ // Don't share any string literals if strings aren't constant.
+ if (!IsConstant)
+ return GenerateStringLiteral(str, false, *this, GlobalName);
+
+ llvm::StringMapEntry<llvm::Constant *> &Entry =
+ ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+
+ if (Entry.getValue())
+ return Entry.getValue();
+
+ // Create a global variable for this.
+ llvm::Constant *C = GenerateStringLiteral(str, true, *this, GlobalName);
+ Entry.setValue(C);
+ return C;
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character
+/// array containing the literal and a terminating '\-'
+/// character. The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName){
+ return GetAddrOfConstantString(str + '\0', GlobalName);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+ ObjCImplementationDecl *D) {
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(getContext()),
+ e = D->propimpl_end(getContext()); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ // Dynamic is just for type-checking.
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ // Determine which methods need to be implemented, some may have
+ // been overridden. Note that ::isSynthesized is not the method
+ // we want, that just indicates if the decl came from a
+ // property. What we want to know is if the method is defined in
+ // this implementation.
+ if (!D->getInstanceMethod(getContext(), PD->getGetterName()))
+ CodeGenFunction(*this).GenerateObjCGetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ if (!PD->isReadOnly() &&
+ !D->getInstanceMethod(getContext(), PD->getSetterName()))
+ CodeGenFunction(*this).GenerateObjCSetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ }
+ }
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+ for (RecordDecl::decl_iterator I = ND->decls_begin(getContext()),
+ E = ND->decls_end(getContext());
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+ if (LSD->getLanguage() != LinkageSpecDecl::lang_c) {
+ ErrorUnsupported(LSD, "linkage spec");
+ return;
+ }
+
+ for (RecordDecl::decl_iterator I = LSD->decls_begin(getContext()),
+ E = LSD->decls_end(getContext());
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+ // If an error has occurred, stop code generation, but continue
+ // parsing and semantic analysis (to ensure all warnings and errors
+ // are emitted).
+ if (Diags.hasErrorOccurred())
+ return;
+
+ switch (D->getKind()) {
+ case Decl::CXXMethod:
+ case Decl::Function:
+ case Decl::Var:
+ EmitGlobal(GlobalDecl(cast<ValueDecl>(D)));
+ break;
+
+ // C++ Decls
+ case Decl::Namespace:
+ EmitNamespace(cast<NamespaceDecl>(D));
+ break;
+ case Decl::CXXConstructor:
+ EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+ break;
+ case Decl::CXXDestructor:
+ EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+ break;
+
+ // Objective-C Decls
+
+ // Forward declarations, no (immediate) code generation.
+ case Decl::ObjCClass:
+ case Decl::ObjCForwardProtocol:
+ case Decl::ObjCCategory:
+ case Decl::ObjCInterface:
+ break;
+
+ case Decl::ObjCProtocol:
+ Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+ break;
+
+ case Decl::ObjCCategoryImpl:
+ // Categories have properties but don't support synthesize so we
+ // can ignore them here.
+ Runtime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ break;
+
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+ EmitObjCPropertyImplementations(OMD);
+ Runtime->GenerateClass(OMD);
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
+ // If this is not a prototype, emit the body.
+ if (OMD->getBody(getContext()))
+ CodeGenFunction(*this).GenerateObjCMethod(OMD);
+ break;
+ }
+ case Decl::ObjCCompatibleAlias:
+ // compatibility-alias is a directive and has no code gen.
+ break;
+
+ case Decl::LinkageSpec:
+ EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+ break;
+
+ case Decl::FileScopeAsm: {
+ FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
+ std::string AsmString(AD->getAsmString()->getStrData(),
+ AD->getAsmString()->getByteLength());
+
+ const std::string &S = getModule().getModuleInlineAsm();
+ if (S.empty())
+ getModule().setModuleInlineAsm(AsmString);
+ else
+ getModule().setModuleInlineAsm(S + '\n' + AsmString);
+ break;
+ }
+
+ default:
+ // Make sure we handled everything we should, every other kind is a
+ // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
+ // function. Need to recode Decl::Kind to do that easily.
+ assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ }
+}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..4d50e89
--- /dev/null
+++ b/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,467 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENMODULE_H
+#define CLANG_CODEGEN_CODEGENMODULE_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "CGBlocks.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CodeGenTypes.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/ValueHandle.h"
+#include <list>
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+}
+
+namespace clang {
+ class ASTContext;
+ class FunctionDecl;
+ class IdentifierInfo;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCProtocolDecl;
+ class ObjCEncodeExpr;
+ class BlockExpr;
+ class Decl;
+ class Expr;
+ class Stmt;
+ class StringLiteral;
+ class NamedDecl;
+ class ValueDecl;
+ class VarDecl;
+ class LangOptions;
+ class CompileOptions;
+ class Diagnostic;
+ class AnnotateAttr;
+ class CXXDestructorDecl;
+
+namespace CodeGen {
+
+ class CodeGenFunction;
+ class CGDebugInfo;
+ class CGObjCRuntime;
+
+/// GlobalDecl - represents a global declaration. This can either be a
+/// CXXConstructorDecl and the constructor type (Base, Complete).
+/// a CXXDestructorDecl and the destructor type (Base, Complete) or
+// a regular VarDecl or a FunctionDecl.
+class GlobalDecl {
+ llvm::PointerIntPair<const ValueDecl*, 2> Value;
+
+public:
+ GlobalDecl() {}
+
+ explicit GlobalDecl(const ValueDecl *VD) : Value(VD, 0) {
+ assert(!isa<CXXConstructorDecl>(VD) && "Use other ctor with ctor decls!");
+ assert(!isa<CXXDestructorDecl>(VD) && "Use other ctor with dtor decls!");
+ }
+ GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
+ : Value(D, Type) {}
+ GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
+ : Value(D, Type) {}
+
+ const ValueDecl *getDecl() const { return Value.getPointer(); }
+
+ CXXCtorType getCtorType() const {
+ assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
+ return static_cast<CXXCtorType>(Value.getInt());
+ }
+
+ CXXDtorType getDtorType() const {
+ assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
+ return static_cast<CXXDtorType>(Value.getInt());
+ }
+};
+
+/// CodeGenModule - This class organizes the cross-function state that is used
+/// while generating LLVM code.
+class CodeGenModule : public BlockModule {
+ CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+
+ typedef std::vector< std::pair<llvm::Constant*, int> > CtorList;
+
+ ASTContext &Context;
+ const LangOptions &Features;
+ const CompileOptions &CompileOpts;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ Diagnostic &Diags;
+ CodeGenTypes Types;
+ CGObjCRuntime* Runtime;
+ CGDebugInfo* DebugInfo;
+
+ llvm::Function *MemCpyFn;
+ llvm::Function *MemMoveFn;
+ llvm::Function *MemSetFn;
+
+ /// GlobalDeclMap - Mapping of decl names (represented as unique
+ /// character pointers from either the identifier table or the set
+ /// of mangled names) to global variables we have already
+ /// emitted. Note that the entries in this map are the actual
+ /// globals and therefore may not be of the same type as the decl,
+ /// they should be bitcasted on retrieval. Also note that the
+ /// globals are keyed on their source mangled name, not the global name
+ /// (which may change with attributes such as asm-labels). The key
+ /// to this map should be generated using getMangledName().
+ ///
+ /// Note that this map always lines up exactly with the contents of the LLVM
+ /// IR symbol table, but this is quicker to query since it is doing uniqued
+ /// pointer lookups instead of full string lookups.
+ llvm::DenseMap<const char*, llvm::GlobalValue*> GlobalDeclMap;
+
+ /// \brief Contains the strings used for mangled names.
+ ///
+ /// FIXME: Eventually, this should map from the semantic/canonical
+ /// declaration for each global entity to its mangled name (if it
+ /// has one).
+ llvm::StringSet<> MangledNames;
+
+ /// DeferredDecls - This contains all the decls which have definitions but
+ /// which are deferred for emission and therefore should only be output if
+ /// they are actually used. If a decl is in this, then it is known to have
+ /// not been referenced yet. The key to this map is a uniqued mangled name.
+ llvm::DenseMap<const char*, GlobalDecl> DeferredDecls;
+
+ /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
+ /// that *are* actually referenced. These get code generated when the module
+ /// is done.
+ std::vector<GlobalDecl> DeferredDeclsToEmit;
+
+ /// LLVMUsed - List of global values which are required to be
+ /// present in the object file; bitcast to i8*. This is used for
+ /// forcing visibility of symbols which may otherwise be optimized
+ /// out.
+ std::vector<llvm::WeakVH> LLVMUsed;
+
+ /// GlobalCtors - Store the list of global constructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalCtors;
+
+ /// GlobalDtors - Store the list of global destructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalDtors;
+
+ std::vector<llvm::Constant*> Annotations;
+
+ llvm::StringMap<llvm::Constant*> CFConstantStringMap;
+ llvm::StringMap<llvm::Constant*> ConstantStringMap;
+
+ /// CFConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *CFConstantStringClassRef;
+public:
+ CodeGenModule(ASTContext &C, const CompileOptions &CompileOpts,
+ llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
+
+ ~CodeGenModule();
+
+ /// Release - Finalize LLVM code generation.
+ void Release();
+
+ /// getObjCRuntime() - Return a reference to the configured
+ /// Objective-C runtime.
+ CGObjCRuntime &getObjCRuntime() {
+ assert(Runtime && "No Objective-C runtime has been configured.");
+ return *Runtime;
+ }
+
+ /// hasObjCRuntime() - Return true iff an Objective-C runtime has
+ /// been configured.
+ bool hasObjCRuntime() { return !!Runtime; }
+
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ ASTContext &getContext() const { return Context; }
+ const CompileOptions &getCompileOpts() const { return CompileOpts; }
+ const LangOptions &getLangOptions() const { return Features; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ Diagnostic &getDiags() const { return Diags; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+
+ /// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
+ LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
+
+ /// setGlobalVisibility - Set the visibility for the given LLVM
+ /// GlobalValue.
+ void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+
+ /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+ /// given global variable. If Ty is non-null and if the global doesn't exist,
+ /// then it will be greated with the specified type instead of whatever the
+ /// normal requested type would be.
+ llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty = 0);
+
+ /// GetAddrOfFunction - Return the address of the given function. If Ty is
+ /// non-null, then this function will use the specified type if it has to
+ /// create it.
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty = 0);
+
+ /// GetStringForStringLiteral - Return the appropriate bytes for a string
+ /// literal, properly padded to match the literal type. If only the address of
+ /// a constant is needed consider using GetAddrOfConstantStringLiteral.
+ std::string GetStringForStringLiteral(const StringLiteral *E);
+
+ /// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
+ /// for the given string literal.
+ llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+
+ /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+ /// array for the given ObjCEncodeExpr node.
+ llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+ /// GetAddrOfConstantString - Returns a pointer to a character array
+ /// containing the literal. This contents are exactly that of the given
+ /// string, i.e. it will not be null terminated automatically; see
+ /// GetAddrOfConstantCString. Note that whether the result is actually a
+ /// pointer to an LLVM constant depends on Feature.WriteableStrings.
+ ///
+ /// The result has pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global
+ /// (if one is created).
+ llvm::Constant *GetAddrOfConstantString(const std::string& str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfConstantCString - Returns a pointer to a character array
+ /// containing the literal and a terminating '\0' character. The result has
+ /// pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global (if one is
+ /// created).
+ llvm::Constant *GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfCXXConstructor - Return the address of the constructor of the
+ /// given type.
+ llvm::Function *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+
+ /// GetAddrOfCXXDestructor - Return the address of the constructor of the
+ /// given type.
+ llvm::Function *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ /// getBuiltinLibFunction - Given a builtin id for a function like
+ /// "__builtin_fabsf", return a Function* for "fabsf".
+ llvm::Value *getBuiltinLibFunction(unsigned BuiltinID);
+
+ llvm::Function *getMemCpyFn();
+ llvm::Function *getMemMoveFn();
+ llvm::Function *getMemSetFn();
+ llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
+ unsigned NumTys = 0);
+
+ /// EmitTopLevelDecl - Emit code for a single top level declaration.
+ void EmitTopLevelDecl(Decl *D);
+
+ /// AddUsedGlobal - Add a global which should be forced to be
+ /// present in the object file; these are emitted to the llvm.used
+ /// metadata global.
+ void AddUsedGlobal(llvm::GlobalValue *GV);
+
+ void AddAnnotation(llvm::Constant *C) { Annotations.push_back(C); }
+
+ /// CreateRuntimeFunction - Create a new runtime function with the specified
+ /// type and name.
+ llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
+ const char *Name);
+ /// CreateRuntimeVariable - Create a new runtime global variable with the
+ /// specified type and name.
+ llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
+ const char *Name);
+
+ void UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+ }
+
+ /// EmitConstantExpr - Try to emit the given expression as a
+ /// constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitNullConstant - Return the result of value-initializing the given
+ /// type, i.e. a null expression of the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstant(QualType T);
+
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA, unsigned LineNo);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified decl yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError=false);
+
+ /// SetInternalFunctionAttributes - Set the attributes on the LLVM
+ /// function for the given decl and function info. This applies
+ /// attributes necessary for handling the ABI as well as user
+ /// specified attributes like section.
+ void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ const CGFunctionInfo &FI);
+
+ /// SetLLVMFunctionAttributes - Set the LLVM function attributes
+ /// (sext, zext, etc).
+ void SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F);
+
+ /// SetLLVMFunctionAttributesForDefinition - Set the LLVM function attributes
+ /// which only apply to a function definintion.
+ void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+ /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used
+ /// as a return type.
+ bool ReturnTypeUsesSret(const CGFunctionInfo &FI);
+
+ void ConstructAttributeList(const CGFunctionInfo &Info,
+ const Decl *TargetDecl,
+ AttributeListType &PAL);
+
+ const char *getMangledName(const GlobalDecl &D);
+
+ const char *getMangledName(const NamedDecl *ND);
+ const char *getMangledCXXCtorName(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const char *getMangledCXXDtorName(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ void EmitTentativeDefinition(const VarDecl *D);
+
+ enum GVALinkage {
+ GVA_Internal,
+ GVA_C99Inline,
+ GVA_CXXInline,
+ GVA_StrongExternal
+ };
+
+private:
+ /// UniqueMangledName - Unique a name by (if necessary) inserting it into the
+ /// MangledNames string map.
+ const char *UniqueMangledName(const char *NameStart, const char *NameEnd);
+
+ llvm::Constant *GetOrCreateLLVMFunction(const char *MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D);
+ llvm::Constant *GetOrCreateLLVMGlobal(const char *MangledName,
+ const llvm::PointerType *PTy,
+ const VarDecl *D);
+
+ /// SetCommonAttributes - Set attributes which are common to any
+ /// form of a global definition (alias, Objective-C method,
+ /// function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// SetFunctionDefinitionAttributes - Set attributes for a global definition.
+ void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV);
+
+ /// SetFunctionAttributes - Set function attributes for a function
+ /// declaration.
+ void SetFunctionAttributes(const FunctionDecl *FD,
+ llvm::Function *F,
+ bool IsIncompleteFunction);
+
+ /// EmitGlobal - Emit code for a singal global function or var decl. Forward
+ /// declarations are emitted lazily.
+ void EmitGlobal(GlobalDecl D);
+
+ void EmitGlobalDefinition(GlobalDecl D);
+
+ void EmitGlobalFunctionDefinition(GlobalDecl GD);
+ void EmitGlobalVarDefinition(const VarDecl *D);
+ void EmitAliasDefinition(const ValueDecl *D);
+ void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+
+ // C++ related functions.
+
+ void EmitNamespace(const NamespaceDecl *D);
+ void EmitLinkageSpec(const LinkageSpecDecl *D);
+
+ /// EmitCXXConstructors - Emit constructors (base, complete) from a
+ /// C++ constructor Decl.
+ void EmitCXXConstructors(const CXXConstructorDecl *D);
+
+ /// EmitCXXConstructor - Emit a single constructor with the given type from
+ /// a C++ constructor Decl.
+ void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
+
+ /// EmitCXXDestructors - Emit destructors (base, complete) from a
+ /// C++ destructor Decl.
+ void EmitCXXDestructors(const CXXDestructorDecl *D);
+
+ /// EmitCXXDestructor - Emit a single destructor with the given type from
+ /// a C++ destructor Decl.
+ void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
+
+ /// EmitCtorList - Generates a global array of functions and priorities using
+ /// the given list and name. This array will have appending linkage and is
+ /// suitable for use as a LLVM constructor or destructor array.
+ void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+ void EmitAnnotations(void);
+
+ /// EmitDeferred - Emit any needed decls for which code generation
+ /// was deferred.
+ void EmitDeferred(void);
+
+ /// EmitLLVMUsed - Emit the llvm.used metadata used to force
+ /// references to global which may otherwise be optimized out.
+ void EmitLLVMUsed(void);
+
+ /// MayDeferGeneration - Determine if the given decl can be emitted
+ /// lazily; this is only relevant for definitions. The given decl
+ /// must be either a function or var decl.
+ bool MayDeferGeneration(const ValueDecl *D);
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..af791f6
--- /dev/null
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,614 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+
+#include "CGCall.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+ /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts
+ /// structs and unions. It manages transient information used during layout.
+ /// FIXME : Handle field aligments. Handle packed structs.
+ class RecordOrganizer {
+ public:
+ explicit RecordOrganizer(CodeGenTypes &Types, const RecordDecl& Record) :
+ CGT(Types), RD(Record), STy(NULL) {}
+
+ /// layoutStructFields - Do the actual work and lay out all fields. Create
+ /// corresponding llvm struct type. This should be invoked only after
+ /// all fields are added.
+ void layoutStructFields(const ASTRecordLayout &RL);
+
+ /// layoutUnionFields - Do the actual work and lay out all fields. Create
+ /// corresponding llvm struct type. This should be invoked only after
+ /// all fields are added.
+ void layoutUnionFields(const ASTRecordLayout &RL);
+
+ /// getLLVMType - Return associated llvm struct type. This may be NULL
+ /// if fields are not laid out.
+ llvm::Type *getLLVMType() const {
+ return STy;
+ }
+
+ llvm::SmallSet<unsigned, 8> &getPaddingFields() {
+ return PaddingFields;
+ }
+
+ private:
+ CodeGenTypes &CGT;
+ const RecordDecl& RD;
+ llvm::Type *STy;
+ llvm::SmallSet<unsigned, 8> PaddingFields;
+ };
+}
+
+CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
+ const llvm::TargetData &TD)
+ : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
+ TheABIInfo(0) {
+}
+
+CodeGenTypes::~CodeGenTypes() {
+ for(llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+ I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+ I != E; ++I)
+ delete I->second;
+ CGRecordLayouts.clear();
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ llvm::PATypeHolder Result = ConvertTypeRecursive(T);
+
+ // Any pointers that were converted defered evaluation of their pointee type,
+ // creating an opaque type instead. This is in order to avoid problems with
+ // circular types. Loop through all these defered pointees, if any, and
+ // resolve them now.
+ while (!PointersToResolve.empty()) {
+ std::pair<QualType, llvm::OpaqueType*> P =
+ PointersToResolve.back();
+ PointersToResolve.pop_back();
+ // We can handle bare pointers here because we know that the only pointers
+ // to the Opaque type are P.second and from other types. Refining the
+ // opqaue type away will invalidate P.second, but we don't mind :).
+ const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
+ P.second->refineAbstractTypeTo(NT);
+ }
+
+ return Result;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ // See if type is already cached.
+ llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
+ I = TypeCache.find(T.getTypePtr());
+ // If type is found in map and this is not a definition for a opaque
+ // place holder type then use it. Otherwise, convert type T.
+ if (I != TypeCache.end())
+ return I->second.get();
+
+ const llvm::Type *ResultType = ConvertNewType(T);
+ TypeCache.insert(std::make_pair(T.getTypePtr(),
+ llvm::PATypeHolder(ResultType)));
+ return ResultType;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
+ const llvm::Type *ResultType = ConvertTypeRecursive(T);
+ if (ResultType == llvm::Type::Int1Ty)
+ return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
+ return ResultType;
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type. For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
+ const llvm::Type *R = ConvertType(T);
+
+ // If this is a non-bool type, don't map it.
+ if (R != llvm::Type::Int1Ty)
+ return R;
+
+ // Otherwise, return an integer of the target-specified size.
+ return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
+
+}
+
+// Code to verify a given function type is complete, i.e. the return type
+// and all of the argument types are complete.
+static const TagType *VerifyFuncTypeComplete(const Type* T) {
+ const FunctionType *FT = cast<FunctionType>(T);
+ if (const TagType* TT = FT->getResultType()->getAsTagType())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
+ for (unsigned i = 0; i < FPT->getNumArgs(); i++)
+ if (const TagType* TT = FPT->getArgType(i)->getAsTagType())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ return 0;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+ if (TDTI == TagDeclTypes.end()) return;
+
+ // Remember the opaque LLVM type for this tagdecl.
+ llvm::PATypeHolder OpaqueHolder = TDTI->second;
+ assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
+ "Updating compilation of an already non-opaque type?");
+
+ // Remove it from TagDeclTypes so that it will be regenerated.
+ TagDeclTypes.erase(TDTI);
+
+ // Generate the new type.
+ const llvm::Type *NT = ConvertTagDeclType(TD);
+
+ // Refine the old opaque type to its new definition.
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
+
+ // Since we just completed a tag type, check to see if any function types
+ // were completed along with the tag type.
+ // FIXME: This is very inefficient; if we track which function types depend
+ // on which tag types, though, it should be reasonably efficient.
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
+ for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
+ if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
+ // This function type still depends on an incomplete tag type; make sure
+ // that tag type has an associated opaque type.
+ ConvertTagDeclType(TT->getDecl());
+ } else {
+ // This function no longer depends on an incomplete tag type; create the
+ // function type, and refine the opaque type to the new function type.
+ llvm::PATypeHolder OpaqueHolder = i->second;
+ const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
+ FunctionTypes.erase(i);
+ }
+ }
+}
+
+static const llvm::Type* getTypeForFormat(const llvm::fltSemantics &format) {
+ if (&format == &llvm::APFloat::IEEEsingle)
+ return llvm::Type::FloatTy;
+ if (&format == &llvm::APFloat::IEEEdouble)
+ return llvm::Type::DoubleTy;
+ if (&format == &llvm::APFloat::IEEEquad)
+ return llvm::Type::FP128Ty;
+ if (&format == &llvm::APFloat::PPCDoubleDouble)
+ return llvm::Type::PPC_FP128Ty;
+ if (&format == &llvm::APFloat::x87DoubleExtended)
+ return llvm::Type::X86_FP80Ty;
+ assert(0 && "Unknown float format!");
+ return 0;
+}
+
+const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
+ const clang::Type &Ty = *Context.getCanonicalType(T);
+
+ switch (Ty.getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical or dependent types aren't possible.");
+ break;
+
+ case Type::Builtin: {
+ switch (cast<BuiltinType>(Ty).getKind()) {
+ default: assert(0 && "Unknown builtin type!");
+ case BuiltinType::Void:
+ // LLVM void type can only be used as the result of a function call. Just
+ // map to the same as char.
+ return llvm::IntegerType::get(8);
+
+ case BuiltinType::Bool:
+ // Note that we always return bool as i1 for use as a scalar type.
+ return llvm::Type::Int1Ty;
+
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::WChar:
+ return llvm::IntegerType::get(
+ static_cast<unsigned>(Context.getTypeSize(T)));
+
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ return getTypeForFormat(Context.getFloatTypeSemantics(T));
+
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ return llvm::IntegerType::get(128);
+ }
+ break;
+ }
+ case Type::FixedWidthInt:
+ return llvm::IntegerType::get(cast<FixedWidthIntType>(T)->getWidth());
+ case Type::Complex: {
+ const llvm::Type *EltTy =
+ ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
+ return llvm::StructType::get(EltTy, EltTy, NULL);
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType &RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+ case Type::Pointer: {
+ const PointerType &PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+
+ case Type::VariableArray: {
+ const VariableArrayType &A = cast<VariableArrayType>(Ty);
+ assert(A.getIndexTypeQualifier() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ return ConvertTypeForMemRecursive(A.getElementType());
+ }
+ case Type::IncompleteArray: {
+ const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
+ assert(A.getIndexTypeQualifier() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // int X[] -> [0 x int]
+ return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
+ }
+ case Type::ConstantArray: {
+ const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
+ const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
+ return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType &VT = cast<VectorType>(Ty);
+ return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
+ VT.getNumElements());
+ }
+ case Type::FunctionNoProto:
+ case Type::FunctionProto: {
+ // First, check whether we can build the full function type.
+ if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
+ // This function's type depends on an incomplete tag type; make sure
+ // we have an opaque type corresponding to the tag type.
+ ConvertTagDeclType(TT->getDecl());
+ // Create an opaque type for this function type, save it, and return it.
+ llvm::Type *ResultType = llvm::OpaqueType::get();
+ FunctionTypes.insert(std::make_pair(&Ty, ResultType));
+ return ResultType;
+ }
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
+ return GetFunctionType(getFunctionInfo(FPT), FPT->isVariadic());
+
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ return GetFunctionType(getFunctionInfo(FNPT), true);
+ }
+
+ case Type::ExtQual:
+ return
+ ConvertTypeRecursive(QualType(cast<ExtQualType>(Ty).getBaseType(), 0));
+
+ case Type::ObjCQualifiedInterface: {
+ // Lower foo<P1,P2> just like foo.
+ ObjCInterfaceDecl *ID = cast<ObjCQualifiedInterfaceType>(Ty).getDecl();
+ return ConvertTypeRecursive(Context.getObjCInterfaceType(ID));
+ }
+
+ case Type::ObjCInterface: {
+ // Objective-C interfaces are always opaque (outside of the
+ // runtime, which can do whatever it likes); we never refine
+ // these.
+ const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
+ if (!T)
+ T = llvm::OpaqueType::get();
+ return T;
+ }
+
+ case Type::ObjCQualifiedId:
+ // Protocols don't influence the LLVM type.
+ return ConvertTypeRecursive(Context.getObjCIdType());
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *TD = cast<TagType>(Ty).getDecl();
+ const llvm::Type *Res = ConvertTagDeclType(TD);
+
+ std::string TypeName(TD->getKindName());
+ TypeName += '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (TD->getIdentifier())
+ TypeName += TD->getNameAsString();
+ else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
+ TypeName += TdT->getDecl()->getNameAsString();
+ else
+ TypeName += "anon";
+
+ TheModule.addTypeName(TypeName, Res);
+ return Res;
+ }
+
+ case Type::BlockPointer: {
+ const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
+ PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
+ return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
+ }
+
+ case Type::MemberPointer: {
+ // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
+ // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
+ // If we ever want to support other ABIs this needs to be abstracted.
+
+ QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
+ if (ETy->isFunctionType()) {
+ return llvm::StructType::get(ConvertType(Context.getPointerDiffType()),
+ ConvertType(Context.getPointerDiffType()),
+ NULL);
+ } else
+ return ConvertType(Context.getPointerDiffType());
+ }
+
+ case Type::TemplateSpecialization:
+ assert(false && "Dependent types can't get here");
+ }
+
+ // FIXME: implement.
+ return llvm::OpaqueType::get();
+}
+
+/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+/// enum.
+const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
+ // TagDecl's are not necessarily unique, instead use the (clang)
+ // type connected to the decl.
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+
+ // If we've already compiled this tag type, use the previous definition.
+ if (TDTI != TagDeclTypes.end())
+ return TDTI->second;
+
+ // If this is still a forward definition, just define an opaque type to use
+ // for this tagged decl.
+ if (!TD->isDefinition()) {
+ llvm::Type *ResultType = llvm::OpaqueType::get();
+ TagDeclTypes.insert(std::make_pair(Key, ResultType));
+ return ResultType;
+ }
+
+ // Okay, this is a definition of a type. Compile the implementation now.
+
+ if (TD->isEnum()) {
+ // Don't bother storing enums in TagDeclTypes.
+ return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
+ }
+
+ // This decl could well be recursive. In this case, insert an opaque
+ // definition of this type, which the recursive uses will get. We will then
+ // refine this opaque version later.
+
+ // Create new OpaqueType now for later use in case this is a recursive
+ // type. This will later be refined to the actual type.
+ llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get();
+ TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
+
+ const llvm::Type *ResultType;
+ const RecordDecl *RD = cast<const RecordDecl>(TD);
+
+ // There isn't any extra information for empty structures/unions.
+ if (RD->field_empty(getContext())) {
+ ResultType = llvm::StructType::get(std::vector<const llvm::Type*>());
+ } else {
+ // Layout fields.
+ RecordOrganizer RO(*this, *RD);
+
+ if (TD->isStruct() || TD->isClass())
+ RO.layoutStructFields(Context.getASTRecordLayout(RD));
+ else {
+ assert(TD->isUnion() && "unknown tag decl kind!");
+ RO.layoutUnionFields(Context.getASTRecordLayout(RD));
+ }
+
+ // Get llvm::StructType.
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ CGRecordLayouts[Key] = new CGRecordLayout(RO.getLLVMType(),
+ RO.getPaddingFields());
+ ResultType = RO.getLLVMType();
+ }
+
+ // Refine our Opaque type to ResultType. This can invalidate ResultType, so
+ // make sure to read the result out of the holder.
+ cast<llvm::OpaqueType>(ResultHolder.get())
+ ->refineAbstractTypeTo(ResultType);
+
+ return ResultHolder.get();
+}
+
+/// getLLVMFieldNo - Return llvm::StructType element number
+/// that corresponds to the field FD.
+unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
+ llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD);
+ assert (I != FieldInfo.end() && "Unable to find field info");
+ return I->second;
+}
+
+/// addFieldInfo - Assign field number to field FD.
+void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) {
+ FieldInfo[FD] = No;
+}
+
+/// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD.
+CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) {
+ llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator
+ I = BitFields.find(FD);
+ assert (I != BitFields.end() && "Unable to find bitfield info");
+ return I->second;
+}
+
+/// addBitFieldInfo - Assign a start bit and a size to field FD.
+void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned Begin,
+ unsigned Size) {
+ BitFields.insert(std::make_pair(FD, BitFieldInfo(Begin, Size)));
+}
+
+/// getCGRecordLayout - Return record layout info for the given llvm::Type.
+const CGRecordLayout *
+CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ llvm::DenseMap<const Type*, CGRecordLayout *>::iterator I
+ = CGRecordLayouts.find(Key);
+ assert (I != CGRecordLayouts.end()
+ && "Unable to find record layout information for type");
+ return I->second;
+}
+
+/// layoutStructFields - Do the actual work and lay out all fields. Create
+/// corresponding llvm struct type.
+/// Note that this doesn't actually try to do struct layout; it depends on
+/// the layout built by the AST. (We have to do struct layout to do Sema,
+/// and there's no point to duplicating the work.)
+void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) {
+ // FIXME: This code currently always generates packed structures.
+ // Unpacked structures are more readable, and sometimes more efficient!
+ // (But note that any changes here are likely to impact CGExprConstant,
+ // which makes some messy assumptions.)
+ uint64_t llvmSize = 0;
+ // FIXME: Make this a SmallVector
+ std::vector<const llvm::Type*> LLVMFields;
+
+ unsigned curField = 0;
+ for (RecordDecl::field_iterator Field = RD.field_begin(CGT.getContext()),
+ FieldEnd = RD.field_end(CGT.getContext());
+ Field != FieldEnd; ++Field) {
+ uint64_t offset = RL.getFieldOffset(curField);
+ const llvm::Type *Ty = CGT.ConvertTypeForMemRecursive(Field->getType());
+ uint64_t size = CGT.getTargetData().getTypeAllocSizeInBits(Ty);
+
+ if (Field->isBitField()) {
+ uint64_t BitFieldSize =
+ Field->getBitWidth()->EvaluateAsInt(CGT.getContext()).getZExtValue();
+
+ // Bitfield field info is different from other field info;
+ // it actually ignores the underlying LLVM struct because
+ // there isn't any convenient mapping.
+ CGT.addFieldInfo(*Field, offset / size);
+ CGT.addBitFieldInfo(*Field, offset % size, BitFieldSize);
+ } else {
+ // Put the element into the struct. This would be simpler
+ // if we didn't bother, but it seems a bit too strange to
+ // allocate all structs as i8 arrays.
+ while (llvmSize < offset) {
+ LLVMFields.push_back(llvm::Type::Int8Ty);
+ llvmSize += 8;
+ }
+
+ llvmSize += size;
+ CGT.addFieldInfo(*Field, LLVMFields.size());
+ LLVMFields.push_back(Ty);
+ }
+ ++curField;
+ }
+
+ while (llvmSize < RL.getSize()) {
+ LLVMFields.push_back(llvm::Type::Int8Ty);
+ llvmSize += 8;
+ }
+
+ STy = llvm::StructType::get(LLVMFields, true);
+ assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());
+}
+
+/// layoutUnionFields - Do the actual work and lay out all fields. Create
+/// corresponding llvm struct type. This should be invoked only after
+/// all fields are added.
+void RecordOrganizer::layoutUnionFields(const ASTRecordLayout &RL) {
+ unsigned curField = 0;
+ for (RecordDecl::field_iterator Field = RD.field_begin(CGT.getContext()),
+ FieldEnd = RD.field_end(CGT.getContext());
+ Field != FieldEnd; ++Field) {
+ // The offset should usually be zero, but bitfields could be strange
+ uint64_t offset = RL.getFieldOffset(curField);
+ CGT.ConvertTypeRecursive(Field->getType());
+
+ if (Field->isBitField()) {
+ Expr *BitWidth = Field->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGT.getContext()).getZExtValue();
+
+ CGT.addFieldInfo(*Field, 0);
+ CGT.addBitFieldInfo(*Field, offset, BitFieldSize);
+ } else {
+ CGT.addFieldInfo(*Field, 0);
+ }
+ ++curField;
+ }
+
+ // This looks stupid, but it is correct in the sense that
+ // it works no matter how complicated the sizes and alignments
+ // of the union elements are. The natural alignment
+ // of the result doesn't matter because anyone allocating
+ // structures should be aligning them appropriately anyway.
+ // FIXME: We can be a bit more intuitive in a lot of cases.
+ // FIXME: Make this a struct type to work around PR2399; the
+ // C backend doesn't like structs using array types.
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(llvm::Type::Int8Ty,
+ RL.getSize() / 8));
+ STy = llvm::StructType::get(LLVMFields, true);
+ assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());
+}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..b72d8e9
--- /dev/null
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,212 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTYPES_H
+#define CLANG_CODEGEN_CODEGENTYPES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include <vector>
+
+#include "CGCall.h"
+
+namespace llvm {
+ class FunctionType;
+ class Module;
+ class OpaqueType;
+ class PATypeHolder;
+ class TargetData;
+ class Type;
+}
+
+namespace clang {
+ class ABIInfo;
+ class ASTContext;
+ class CXXMethodDecl;
+ class FieldDecl;
+ class FunctionProtoType;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class PointerType;
+ class QualType;
+ class RecordDecl;
+ class TagDecl;
+ class TargetInfo;
+ class Type;
+
+namespace CodeGen {
+ class CodeGenTypes;
+
+ /// CGRecordLayout - This class handles struct and union layout info while
+ /// lowering AST types to LLVM types.
+ class CGRecordLayout {
+ CGRecordLayout(); // DO NOT IMPLEMENT
+ public:
+ CGRecordLayout(llvm::Type *T, llvm::SmallSet<unsigned, 8> &PF)
+ : STy(T), PaddingFields(PF) {
+ // FIXME : Collect info about fields that requires adjustments
+ // (i.e. fields that do not directly map to llvm struct fields.)
+ }
+
+ /// getLLVMType - Return llvm type associated with this record.
+ llvm::Type *getLLVMType() const {
+ return STy;
+ }
+
+ bool isPaddingField(unsigned No) const {
+ return PaddingFields.count(No) != 0;
+ }
+
+ unsigned getNumPaddingFields() {
+ return PaddingFields.size();
+ }
+
+ private:
+ llvm::Type *STy;
+ llvm::SmallSet<unsigned, 8> PaddingFields;
+ };
+
+/// CodeGenTypes - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTypes {
+ ASTContext &Context;
+ TargetInfo &Target;
+ llvm::Module& TheModule;
+ const llvm::TargetData& TheTargetData;
+ mutable const ABIInfo* TheABIInfo;
+
+ llvm::SmallVector<std::pair<QualType,
+ llvm::OpaqueType *>, 8> PointersToResolve;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes;
+
+ /// The opaque type map for Objective-C interfaces. All direct
+ /// manipulation is done by the runtime interfaces, which are
+ /// responsible for coercing to the appropriate type; these opaque
+ /// types are never refined.
+ llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
+
+ /// CGRecordLayouts - This maps llvm struct type with corresponding
+ /// record layout info.
+ /// FIXME : If CGRecordLayout is less than 16 bytes then use
+ /// inline it in the map.
+ llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+ /// FieldInfo - This maps struct field with corresponding llvm struct type
+ /// field no. This info is populated by record organizer.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// FunctionInfos - Hold memoized CGFunctionInfo results.
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+public:
+ class BitFieldInfo {
+ public:
+ explicit BitFieldInfo(unsigned short B, unsigned short S)
+ : Begin(B), Size(S) {}
+
+ unsigned short Begin;
+ unsigned short Size;
+ };
+
+private:
+ llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields;
+
+ /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
+ /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
+ /// used instead of llvm::Type because it allows us to bypass potential
+ /// dangling type pointers due to type refinement on llvm side.
+ llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
+
+ /// ConvertNewType - Convert type T into a llvm::Type. Do not use this
+ /// method directly because it does not do any type caching. This method
+ /// is available only for ConvertType(). CovertType() is preferred
+ /// interface to convert type T into a llvm::Type.
+ const llvm::Type *ConvertNewType(QualType T);
+public:
+ CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD);
+ ~CodeGenTypes();
+
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ TargetInfo &getTarget() const { return Target; }
+ ASTContext &getContext() const { return Context; }
+ const ABIInfo &getABIInfo() const;
+
+ /// ConvertType - Convert type T into a llvm::Type.
+ const llvm::Type *ConvertType(QualType T);
+ const llvm::Type *ConvertTypeRecursive(QualType T);
+
+ /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+ /// ConvertType in that it is used to convert to the memory representation for
+ /// a type. For example, the scalar representation for _Bool is i1, but the
+ /// memory representation is usually i8 or i32, depending on the target.
+ const llvm::Type *ConvertTypeForMem(QualType T);
+ const llvm::Type *ConvertTypeForMemRecursive(QualType T);
+
+ /// GetFunctionType - Get the LLVM function type for \arg Info.
+ const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
+ bool IsVariadic);
+
+ const CGRecordLayout *getCGRecordLayout(const TagDecl*) const;
+
+ /// getLLVMFieldNo - Return llvm::StructType element number
+ /// that corresponds to the field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD);
+
+ /// UpdateCompletedType - When we find the full definition for a TagDecl,
+ /// replace the 'opaque' type we previously made for it if applicable.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ /// getFunctionInfo - Get the CGFunctionInfo for this function signature.
+ const CGFunctionInfo &getFunctionInfo(QualType RetTy,
+ const llvm::SmallVector<QualType,16>
+ &ArgTys);
+
+ const CGFunctionInfo &getFunctionInfo(const FunctionNoProtoType *FTNP);
+ const CGFunctionInfo &getFunctionInfo(const FunctionProtoType *FTP);
+ const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
+ const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const CallArgList &Args);
+public:
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args);
+
+public: // These are internal details of CGT that shouldn't be used externally.
+ /// addFieldInfo - Assign field number to field FD.
+ void addFieldInfo(const FieldDecl *FD, unsigned No);
+
+ /// addBitFieldInfo - Assign a start bit and a size to field FD.
+ void addBitFieldInfo(const FieldDecl *FD, unsigned Begin, unsigned Size);
+
+ /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field
+ /// FD.
+ BitFieldInfo getBitFieldInfo(const FieldDecl *FD);
+
+ /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+ /// enum.
+ const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
+
+ /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as on the provided vector \arg
+ /// ArgTys. See ABIArgInfo::Expand.
+ void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/Makefile b/lib/CodeGen/Makefile
new file mode 100644
index 0000000..e716fe7
--- /dev/null
+++ b/lib/CodeGen/Makefile
@@ -0,0 +1,23 @@
+##===- clang/lib/CodeGen/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST -> LLVM code generation library for the
+# C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangCodeGen
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
new file mode 100644
index 0000000..6ee1223
--- /dev/null
+++ b/lib/CodeGen/Mangle.cpp
@@ -0,0 +1,772 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ class VISIBILITY_HIDDEN CXXNameMangler {
+ ASTContext &Context;
+ llvm::raw_ostream &Out;
+
+ const CXXMethodDecl *Structor;
+ unsigned StructorType;
+ CXXCtorType CtorType;
+
+ public:
+ CXXNameMangler(ASTContext &C, llvm::raw_ostream &os)
+ : Context(C), Out(os), Structor(0), StructorType(0) { }
+
+ bool mangle(const NamedDecl *D);
+ void mangleGuardVariable(const VarDecl *D);
+
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type);
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ private:
+ bool mangleFunctionDecl(const FunctionDecl *FD);
+
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleName(const NamedDecl *ND);
+ void mangleUnqualifiedName(const NamedDecl *ND);
+ void mangleSourceName(const IdentifierInfo *II);
+ void mangleLocalName(const NamedDecl *ND);
+ void mangleNestedName(const NamedDecl *ND);
+ void manglePrefix(const DeclContext *DC);
+ void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+ void mangleCVQualifiers(unsigned Quals);
+ void mangleType(QualType T);
+ void mangleType(const BuiltinType *T);
+ void mangleType(const FunctionType *T);
+ void mangleBareFunctionType(const FunctionType *T, bool MangleReturnType);
+ void mangleType(const TagType *T);
+ void mangleType(const ArrayType *T);
+ void mangleType(const MemberPointerType *T);
+ void mangleType(const TemplateTypeParmType *T);
+ void mangleType(const ObjCInterfaceType *T);
+ void mangleExpression(Expr *E);
+ void mangleCXXCtorType(CXXCtorType T);
+ void mangleCXXDtorType(CXXDtorType T);
+
+ void mangleTemplateArgumentList(const TemplateArgumentList &L);
+ void mangleTemplateArgument(const TemplateArgument &A);
+ };
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool CXXNameMangler::mangleFunctionDecl(const FunctionDecl *FD) {
+ // Clang's "overloadable" attribute extension to C/C++ implies
+ // name mangling (always).
+ if (FD->hasAttr<OverloadableAttr>()) {
+ ; // fall into mangling code unconditionally.
+ } else if (// C functions are not mangled
+ !Context.getLangOptions().CPlusPlus ||
+ // "main" is not mangled in C++
+ FD->isMain() ||
+ // No mangling in an "implicit extern C" header.
+ (FD->getLocation().isValid() &&
+ Context.getSourceManager().getFileCharacteristic(FD->getLocation()))
+ == SrcMgr::C_ExternCSystem ||
+ // No name mangling in a C linkage specification.
+ isInCLinkageSpecification(FD))
+ return false;
+
+ // If we get here, mangle the decl name!
+ Out << "_Z";
+ mangleFunctionEncoding(FD);
+ return true;
+}
+
+bool CXXNameMangler::mangle(const NamedDecl *D) {
+ // Any decl can be declared with __asm("foo") on it, and this takes
+ // precedence over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+ Out << ALA->getLabel();
+ return true;
+ }
+
+ // <mangled-name> ::= _Z <encoding>
+ // ::= <data name>
+ // ::= <special-name>
+
+ // FIXME: Actually use a visitor to decode these?
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return mangleFunctionDecl(FD);
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!Context.getLangOptions().CPlusPlus ||
+ isInCLinkageSpecification(D) ||
+ D->getDeclContext()->isTranslationUnit())
+ return false;
+
+ Out << "_Z";
+ mangleName(VD);
+ return true;
+ }
+
+ return false;
+}
+
+void CXXNameMangler::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ assert(!Structor && "Structor already set!");
+ Structor = D;
+ StructorType = Type;
+
+ mangle(D);
+}
+
+void CXXNameMangler::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ assert(!Structor && "Structor already set!");
+ Structor = D;
+ StructorType = Type;
+
+ mangle(D);
+}
+
+void CXXNameMangler::mangleGuardVariable(const VarDecl *D)
+{
+ // <special-name> ::= GV <object name> # Guard variable for one-time
+ // # initialization
+
+ Out << "_ZGV";
+ mangleName(D);
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <encoding> ::= <function name> <bare-function-type>
+ mangleName(FD);
+ mangleBareFunctionType(FD->getType()->getAsFunctionType(), false);
+}
+
+static bool isStdNamespace(const DeclContext *DC) {
+ if (!DC->isNamespace() || !DC->getParent()->isTranslationUnit())
+ return false;
+
+ const NamespaceDecl *NS = cast<NamespaceDecl>(DC);
+ return NS->getOriginalNamespace()->getIdentifier()->isStr("std");
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <nested-name>
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name> # See Scope Encoding below
+ //
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name> # ::std::
+ if (ND->getDeclContext()->isTranslationUnit())
+ mangleUnqualifiedName(ND);
+ else if (isStdNamespace(ND->getDeclContext())) {
+ Out << "St";
+ mangleUnqualifiedName(ND);
+ } else if (isa<FunctionDecl>(ND->getDeclContext()))
+ mangleLocalName(ND);
+ else
+ mangleNestedName(ND);
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ DeclarationName Name = ND->getDeclName();
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ mangleSourceName(Name.getAsIdentifierInfo());
+ break;
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ constructor we're mangling, use the
+ // type we were given.
+ mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+ else
+ // Otherwise, use the complete constructor name. This is relevant if a
+ // class with a constructor is declared within a constructor.
+ mangleCXXCtorType(Ctor_Complete);
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling, use the
+ // type we were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the complete destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Complete);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= cv <type> # (cast)
+ Out << "cv";
+ mangleType(Context.getCanonicalType(Name.getCXXNameType()));
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator(),
+ cast<FunctionDecl>(ND)->getNumParams());
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND) {
+ // <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+ // FIXME: no template support
+ Out << 'N';
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND))
+ mangleCVQualifiers(Method->getTypeQualifiers());
+ manglePrefix(ND->getDeclContext());
+ mangleUnqualifiedName(ND);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
+ // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+ // := Z <function encoding> E s [<discriminator>]
+ // <discriminator> := _ <non-negative number>
+ Out << 'Z';
+ mangleFunctionEncoding(cast<FunctionDecl>(ND->getDeclContext()));
+ Out << 'E';
+ mangleSourceName(ND->getIdentifier());
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC) {
+ // <prefix> ::= <prefix> <unqualified-name>
+ // ::= <template-prefix> <template-args>
+ // ::= <template-param>
+ // ::= # empty
+ // ::= <substitution>
+ // FIXME: We only handle mangling of namespaces and classes at the moment.
+ if (!DC->getParent()->isTranslationUnit())
+ manglePrefix(DC->getParent());
+
+ if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(DC))
+ mangleSourceName(Namespace->getIdentifier());
+ else if (const RecordDecl *Record = dyn_cast<RecordDecl>(DC)) {
+ if (const ClassTemplateSpecializationDecl *D =
+ dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
+ mangleType(QualType(D->getTypeForDecl(), 0));
+ } else
+ mangleSourceName(Record->getIdentifier());
+ }
+}
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+ switch (OO) {
+ // <operator-name> ::= nw # new
+ case OO_New: Out << "nw"; break;
+ // ::= na # new[]
+ case OO_Array_New: Out << "na"; break;
+ // ::= dl # delete
+ case OO_Delete: Out << "dl"; break;
+ // ::= da # delete[]
+ case OO_Array_Delete: Out << "da"; break;
+ // ::= ps # + (unary)
+ // ::= pl # +
+ case OO_Plus: Out << (Arity == 1? "ps" : "pl"); break;
+ // ::= ng # - (unary)
+ // ::= mi # -
+ case OO_Minus: Out << (Arity == 1? "ng" : "mi"); break;
+ // ::= ad # & (unary)
+ // ::= an # &
+ case OO_Amp: Out << (Arity == 1? "ad" : "an"); break;
+ // ::= de # * (unary)
+ // ::= ml # *
+ case OO_Star: Out << (Arity == 1? "de" : "ml"); break;
+ // ::= co # ~
+ case OO_Tilde: Out << "co"; break;
+ // ::= dv # /
+ case OO_Slash: Out << "dv"; break;
+ // ::= rm # %
+ case OO_Percent: Out << "rm"; break;
+ // ::= or # |
+ case OO_Pipe: Out << "or"; break;
+ // ::= eo # ^
+ case OO_Caret: Out << "eo"; break;
+ // ::= aS # =
+ case OO_Equal: Out << "aS"; break;
+ // ::= pL # +=
+ case OO_PlusEqual: Out << "pL"; break;
+ // ::= mI # -=
+ case OO_MinusEqual: Out << "mI"; break;
+ // ::= mL # *=
+ case OO_StarEqual: Out << "mL"; break;
+ // ::= dV # /=
+ case OO_SlashEqual: Out << "dV"; break;
+ // ::= rM # %=
+ case OO_PercentEqual: Out << "rM"; break;
+ // ::= aN # &=
+ case OO_AmpEqual: Out << "aN"; break;
+ // ::= oR # |=
+ case OO_PipeEqual: Out << "oR"; break;
+ // ::= eO # ^=
+ case OO_CaretEqual: Out << "eO"; break;
+ // ::= ls # <<
+ case OO_LessLess: Out << "ls"; break;
+ // ::= rs # >>
+ case OO_GreaterGreater: Out << "rs"; break;
+ // ::= lS # <<=
+ case OO_LessLessEqual: Out << "lS"; break;
+ // ::= rS # >>=
+ case OO_GreaterGreaterEqual: Out << "rS"; break;
+ // ::= eq # ==
+ case OO_EqualEqual: Out << "eq"; break;
+ // ::= ne # !=
+ case OO_ExclaimEqual: Out << "ne"; break;
+ // ::= lt # <
+ case OO_Less: Out << "lt"; break;
+ // ::= gt # >
+ case OO_Greater: Out << "gt"; break;
+ // ::= le # <=
+ case OO_LessEqual: Out << "le"; break;
+ // ::= ge # >=
+ case OO_GreaterEqual: Out << "ge"; break;
+ // ::= nt # !
+ case OO_Exclaim: Out << "nt"; break;
+ // ::= aa # &&
+ case OO_AmpAmp: Out << "aa"; break;
+ // ::= oo # ||
+ case OO_PipePipe: Out << "oo"; break;
+ // ::= pp # ++
+ case OO_PlusPlus: Out << "pp"; break;
+ // ::= mm # --
+ case OO_MinusMinus: Out << "mm"; break;
+ // ::= cm # ,
+ case OO_Comma: Out << "cm"; break;
+ // ::= pm # ->*
+ case OO_ArrowStar: Out << "pm"; break;
+ // ::= pt # ->
+ case OO_Arrow: Out << "pt"; break;
+ // ::= cl # ()
+ case OO_Call: Out << "cl"; break;
+ // ::= ix # []
+ case OO_Subscript: Out << "ix"; break;
+ // UNSUPPORTED: ::= qu # ?
+
+ case OO_None:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCVQualifiers(unsigned Quals) {
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ if (Quals & QualType::Restrict)
+ Out << 'r';
+ if (Quals & QualType::Volatile)
+ Out << 'V';
+ if (Quals & QualType::Const)
+ Out << 'K';
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = Context.getCanonicalType(T);
+
+ // FIXME: Should we have a TypeNodes.def to make this easier? (YES!)
+
+ // <type> ::= <CV-qualifiers> <type>
+ mangleCVQualifiers(T.getCVRQualifiers());
+
+ // ::= <builtin-type>
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
+ mangleType(BT);
+ // ::= <function-type>
+ else if (const FunctionType *FT = dyn_cast<FunctionType>(T.getTypePtr()))
+ mangleType(FT);
+ // ::= <class-enum-type>
+ else if (const TagType *TT = dyn_cast<TagType>(T.getTypePtr()))
+ mangleType(TT);
+ // ::= <array-type>
+ else if (const ArrayType *AT = dyn_cast<ArrayType>(T.getTypePtr()))
+ mangleType(AT);
+ // ::= <pointer-to-member-type>
+ else if (const MemberPointerType *MPT
+ = dyn_cast<MemberPointerType>(T.getTypePtr()))
+ mangleType(MPT);
+ // ::= <template-param>
+ else if (const TemplateTypeParmType *TypeParm
+ = dyn_cast<TemplateTypeParmType>(T.getTypePtr()))
+ mangleType(TypeParm);
+ // FIXME: ::= <template-template-param> <template-args>
+ // FIXME: ::= <substitution> # See Compression below
+ // ::= P <type> # pointer-to
+ else if (const PointerType *PT = dyn_cast<PointerType>(T.getTypePtr())) {
+ Out << 'P';
+ mangleType(PT->getPointeeType());
+ }
+ // ::= R <type> # reference-to
+ else if (const LValueReferenceType *RT =
+ dyn_cast<LValueReferenceType>(T.getTypePtr())) {
+ Out << 'R';
+ mangleType(RT->getPointeeType());
+ }
+ // ::= O <type> # rvalue reference-to (C++0x)
+ else if (const RValueReferenceType *RT =
+ dyn_cast<RValueReferenceType>(T.getTypePtr())) {
+ Out << 'O';
+ mangleType(RT->getPointeeType());
+ }
+ // ::= C <type> # complex pair (C 2000)
+ else if (const ComplexType *CT = dyn_cast<ComplexType>(T.getTypePtr())) {
+ Out << 'C';
+ mangleType(CT->getElementType());
+ } else if (const VectorType *VT = dyn_cast<VectorType>(T.getTypePtr())) {
+ // GNU extension: vector types
+ Out << "U8__vector";
+ mangleType(VT->getElementType());
+ } else if (const ObjCInterfaceType *IT =
+ dyn_cast<ObjCInterfaceType>(T.getTypePtr())) {
+ mangleType(IT);
+ }
+ // FIXME: ::= G <type> # imaginary (C 2000)
+ // FIXME: ::= U <source-name> <type> # vendor extended type qualifier
+ else
+ assert(false && "Cannot mangle unknown type");
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+ // <builtin-type> ::= v # void
+ // ::= w # wchar_t
+ // ::= b # bool
+ // ::= c # char
+ // ::= a # signed char
+ // ::= h # unsigned char
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # int
+ // ::= j # unsigned int
+ // ::= l # long
+ // ::= m # unsigned long
+ // ::= x # long long, __int64
+ // ::= y # unsigned long long, __int64
+ // ::= n # __int128
+ // UNSUPPORTED: ::= o # unsigned __int128
+ // ::= f # float
+ // ::= d # double
+ // ::= e # long double, __float80
+ // UNSUPPORTED: ::= g # __float128
+ // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
+ // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
+ // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
+ // UNSUPPORTED: ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ // UNSUPPORTED: ::= Di # char32_t
+ // UNSUPPORTED: ::= Ds # char16_t
+ // ::= u <source-name> # vendor extended type
+ // From our point of view, std::nullptr_t is a builtin, but as far as mangling
+ // is concerned, it's a type called std::nullptr_t.
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'v'; break;
+ case BuiltinType::Bool: Out << 'b'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
+ case BuiltinType::UChar: Out << 'h'; break;
+ case BuiltinType::UShort: Out << 't'; break;
+ case BuiltinType::UInt: Out << 'j'; break;
+ case BuiltinType::ULong: Out << 'm'; break;
+ case BuiltinType::ULongLong: Out << 'y'; break;
+ case BuiltinType::UInt128: Out << 'o'; break;
+ case BuiltinType::SChar: Out << 'a'; break;
+ case BuiltinType::WChar: Out << 'w'; break;
+ case BuiltinType::Short: Out << 's'; break;
+ case BuiltinType::Int: Out << 'i'; break;
+ case BuiltinType::Long: Out << 'l'; break;
+ case BuiltinType::LongLong: Out << 'x'; break;
+ case BuiltinType::Int128: Out << 'n'; break;
+ case BuiltinType::Float: Out << 'f'; break;
+ case BuiltinType::Double: Out << 'd'; break;
+ case BuiltinType::LongDouble: Out << 'e'; break;
+ case BuiltinType::NullPtr: Out << "St9nullptr_t"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleType(const FunctionType *T) {
+ // <function-type> ::= F [Y] <bare-function-type> E
+ Out << 'F';
+ // FIXME: We don't have enough information in the AST to produce the 'Y'
+ // encoding for extern "C" function types.
+ mangleBareFunctionType(T, /*MangleReturnType=*/true);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType) {
+ // <bare-function-type> ::= <signature type>+
+ if (MangleReturnType)
+ mangleType(T->getResultType());
+
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(T);
+ assert(Proto && "Can't mangle K&R function prototypes");
+
+ if (Proto->getNumArgs() == 0) {
+ Out << 'v';
+ return;
+ }
+
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+}
+
+void CXXNameMangler::mangleType(const TagType *T) {
+ // <class-enum-type> ::= <name>
+
+ if (!T->getDecl()->getIdentifier())
+ mangleName(T->getDecl()->getTypedefForAnonDecl());
+ else
+ mangleName(T->getDecl());
+
+ // If this is a class template specialization, mangle the template
+ // arguments.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl()))
+ mangleTemplateArgumentList(Spec->getTemplateArgs());
+}
+
+void CXXNameMangler::mangleType(const ArrayType *T) {
+ // <array-type> ::= A <positive dimension number> _ <element type>
+ // ::= A [<dimension expression>] _ <element type>
+ Out << 'A';
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T))
+ Out << CAT->getSize();
+ else if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(T))
+ mangleExpression(VAT->getSizeExpr());
+ else if (const DependentSizedArrayType *DSAT
+ = dyn_cast<DependentSizedArrayType>(T))
+ mangleExpression(DSAT->getSizeExpr());
+
+ Out << '_';
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+ // <pointer-to-member-type> ::= M <class type> <member type>
+ Out << 'M';
+ mangleType(QualType(T->getClass(), 0));
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ mangleCVQualifiers(FPT->getTypeQuals());
+ mangleType(FPT);
+ } else
+ mangleType(PointeeType);
+}
+
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ // <template-param> ::= T_ # first template parameter
+ // ::= T <parameter-2 non-negative number> _
+ if (T->getIndex() == 0)
+ Out << "T_";
+ else
+ Out << 'T' << (T->getIndex() - 1) << '_';
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleExpression(Expr *E) {
+ assert(false && "Cannot mangle expressions yet");
+}
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+ // <ctor-dtor-name> ::= C1 # complete object constructor
+ // ::= C2 # base object constructor
+ // ::= C3 # complete object allocating constructor
+ //
+ switch (T) {
+ case Ctor_Complete:
+ Out << "C1";
+ break;
+ case Ctor_Base:
+ Out << "C2";
+ break;
+ case Ctor_CompleteAllocating:
+ Out << "C3";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // <ctor-dtor-name> ::= D0 # deleting destructor
+ // ::= D1 # complete object destructor
+ // ::= D2 # base object destructor
+ //
+ switch (T) {
+ case Dtor_Deleting:
+ Out << "D0";
+ break;
+ case Dtor_Complete:
+ Out << "D1";
+ break;
+ case Dtor_Base:
+ Out << "D2";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleTemplateArgumentList(const TemplateArgumentList &L) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << "I";
+
+ for (unsigned i = 0, e = L.size(); i != e; ++i) {
+ const TemplateArgument &A = L[i];
+
+ mangleTemplateArgument(A);
+ }
+
+ Out << "E";
+}
+
+void CXXNameMangler::mangleTemplateArgument(const TemplateArgument &A) {
+ // <template-arg> ::= <type> # type or template
+ // ::= X <expression> E # expression
+ // ::= <expr-primary> # simple expressions
+ // ::= I <template-arg>* E # argument pack
+ // ::= sp <expression> # pack expansion of (C++0x)
+ switch (A.getKind()) {
+ default:
+ assert(0 && "Unknown template argument kind!");
+ case TemplateArgument::Type:
+ mangleType(A.getAsType());
+ break;
+ case TemplateArgument::Integral:
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+
+ Out << 'L';
+
+ mangleType(A.getIntegralType());
+
+ const llvm::APSInt *Integral = A.getAsIntegral();
+ if (A.getIntegralType()->isBooleanType()) {
+ // Boolean values are encoded as 0/1.
+ Out << (Integral->getBoolValue() ? '1' : '0');
+ } else {
+ if (Integral->isNegative())
+ Out << 'n';
+ Integral->abs().print(Out, false);
+ }
+
+ Out << 'E';
+ break;
+ }
+}
+
+namespace clang {
+ /// \brief Mangles the name of the declaration D and emits that name
+ /// to the given output stream.
+ ///
+ /// If the declaration D requires a mangled name, this routine will
+ /// emit that mangled name to \p os and return true. Otherwise, \p
+ /// os will be unchanged and this routine will return false. In this
+ /// case, the caller should just emit the identifier of the declaration
+ /// (\c D->getIdentifier()) as its name.
+ bool mangleName(const NamedDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os) {
+ assert(!isa<CXXConstructorDecl>(D) &&
+ "Use mangleCXXCtor for constructor decls!");
+ assert(!isa<CXXDestructorDecl>(D) &&
+ "Use mangleCXXDtor for destructor decls!");
+
+ CXXNameMangler Mangler(Context, os);
+ if (!Mangler.mangle(D))
+ return false;
+
+ os.flush();
+ return true;
+ }
+
+ /// mangleGuardVariable - Returns the mangled name for a guard variable
+ /// for the passed in VarDecl.
+ void mangleGuardVariable(const VarDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os) {
+ CXXNameMangler Mangler(Context, os);
+ Mangler.mangleGuardVariable(D);
+
+ os.flush();
+ }
+
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os) {
+ CXXNameMangler Mangler(Context, os);
+ Mangler.mangleCXXCtor(D, Type);
+
+ os.flush();
+ }
+
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os) {
+ CXXNameMangler Mangler(Context, os);
+ Mangler.mangleCXXDtor(D, Type);
+
+ os.flush();
+ }
+
+
+}
+
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
new file mode 100644
index 0000000..77cbd97
--- /dev/null
+++ b/lib/CodeGen/Mangle.h
@@ -0,0 +1,44 @@
+//===--- Mangle.h - Mangle C++ Names ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_MANGLE_H
+#define LLVM_CLANG_CODEGEN_MANGLE_H
+
+#include "CGCXX.h"
+
+namespace llvm {
+ class raw_ostream;
+}
+
+namespace clang {
+ class ASTContext;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class NamedDecl;
+ class VarDecl;
+
+ bool mangleName(const NamedDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os);
+ void mangleGuardVariable(const VarDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os);
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os);
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os);
+}
+
+#endif
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..9b85df6
--- /dev/null
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,100 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CodeGenModule.h"
+#include "clang/Frontend/CompileOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+
+namespace {
+ class VISIBILITY_HIDDEN CodeGeneratorImpl : public CodeGenerator {
+ Diagnostic &Diags;
+ llvm::OwningPtr<const llvm::TargetData> TD;
+ ASTContext *Ctx;
+ const CompileOptions CompileOpts; // Intentionally copied in.
+ protected:
+ llvm::OwningPtr<llvm::Module> M;
+ llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
+ public:
+ CodeGeneratorImpl(Diagnostic &diags, const std::string& ModuleName,
+ const CompileOptions &CO)
+ : Diags(diags), CompileOpts(CO), M(new llvm::Module(ModuleName)) {}
+
+ virtual ~CodeGeneratorImpl() {}
+
+ virtual llvm::Module* GetModule() {
+ return M.get();
+ }
+
+ virtual llvm::Module* ReleaseModule() {
+ return M.take();
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+
+ M->setTargetTriple(Ctx->Target.getTargetTriple());
+ M->setDataLayout(Ctx->Target.getTargetDescription());
+ TD.reset(new llvm::TargetData(Ctx->Target.getTargetDescription()));
+ Builder.reset(new CodeGen::CodeGenModule(Context, CompileOpts,
+ *M, *TD, Diags));
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef DG) {
+ // Make sure to emit all elements of a Decl.
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ Builder->EmitTopLevelDecl(*I);
+ }
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// to (e.g. struct, union, enum, class) is completed. This allows the
+ /// client hack on the type, which can occur at any point in the file
+ /// (because these can be defined in declspecs).
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ Builder->UpdateCompletedType(D);
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ if (Diags.hasErrorOccurred()) {
+ M.reset();
+ return;
+ }
+
+ if (Builder)
+ Builder->Release();
+ };
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitTentativeDefinition(D);
+ }
+ };
+}
+
+CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
+ const std::string& ModuleName,
+ const CompileOptions &CO) {
+ return new CodeGeneratorImpl(Diags, ModuleName, CO);
+}
diff --git a/lib/CodeGen/README.txt b/lib/CodeGen/README.txt
new file mode 100644
index 0000000..f60cd03
--- /dev/null
+++ b/lib/CodeGen/README.txt
@@ -0,0 +1,65 @@
+IRgen optimization opportunities.
+
+//===---------------------------------------------------------------------===//
+
+The common pattern of
+--
+short x; // or char, etc
+(x == 10)
+--
+generates an zext/sext of x which can easily be avoided.
+
+//===---------------------------------------------------------------------===//
+
+Bitfields accesses can be shifted to simplify masking and sign
+extension. For example, if the bitfield width is 8 and it is
+appropriately aligned then is is a lot shorter to just load the char
+directly.
+
+//===---------------------------------------------------------------------===//
+
+It may be worth avoiding creation of alloca's for formal arguments
+for the common situation where the argument is never written to or has
+its address taken. The idea would be to begin generating code by using
+the argument directly and if its address is taken or it is stored to
+then generate the alloca and patch up the existing code.
+
+In theory, the same optimization could be a win for block local
+variables as long as the declaration dominates all statements in the
+block.
+
+NOTE: The main case we care about this for is for -O0 -g compile time
+performance, and in that scenario we will need to emit the alloca
+anyway currently to emit proper debug info. So this is blocked by
+being able to emit debug information which refers to an LLVM
+temporary, not an alloca.
+
+//===---------------------------------------------------------------------===//
+
+We should try and avoid generating basic blocks which only contain
+jumps. At -O0, this penalizes us all the way from IRgen (malloc &
+instruction overhead), all the way down through code generation and
+assembly time.
+
+On 176.gcc:expr.ll, it looks like over 12% of basic blocks are just
+direct branches!
+
+//===---------------------------------------------------------------------===//
+
+There are some more places where we could avoid generating unreachable code. For
+example:
+ void f0(int a) { abort(); if (a) printf("hi"); }
+still generates a call to printf. This doesn't occur much in real
+code, but would still be nice to clean up.
+
+//===---------------------------------------------------------------------===//
+
+Deferred generation of statics incurs some additional
+overhead. Currently it is even possible to construct test cases with
+O(N^2) behavior! For at least simple cases where we can tell a global
+is used, it is probably not worth deferring it. This doesn't solve the
+O(N^2) cases, ,though...
+
+PR3810
+
+//===---------------------------------------------------------------------===//
OpenPOWER on IntegriCloud