summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGBlocks.cpp60
-rw-r--r--lib/CodeGen/CGBlocks.h7
-rw-r--r--lib/CodeGen/CGCXX.cpp671
-rw-r--r--lib/CodeGen/CGClass.cpp394
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp63
-rw-r--r--lib/CodeGen/CGDebugInfo.h13
-rw-r--r--lib/CodeGen/CGDecl.cpp2
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp36
-rw-r--r--lib/CodeGen/CGExpr.cpp148
-rw-r--r--lib/CodeGen/CGExprAgg.cpp3
-rw-r--r--lib/CodeGen/CGExprCXX.cpp331
-rw-r--r--lib/CodeGen/CGExprComplex.cpp39
-rw-r--r--lib/CodeGen/CGExprConstant.cpp33
-rw-r--r--lib/CodeGen/CGExprScalar.cpp160
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp32
-rw-r--r--lib/CodeGen/CGRTTI.cpp22
-rw-r--r--lib/CodeGen/CGVtable.cpp259
-rw-r--r--lib/CodeGen/CGVtable.h110
-rw-r--r--lib/CodeGen/CMakeLists.txt2
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp33
-rw-r--r--lib/CodeGen/CodeGenFunction.h28
-rw-r--r--lib/CodeGen/CodeGenModule.cpp72
-rw-r--r--lib/CodeGen/CodeGenModule.h19
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp43
-rw-r--r--lib/CodeGen/CodeGenTypes.h7
-rw-r--r--lib/CodeGen/Mangle.cpp5
-rw-r--r--lib/CodeGen/TargetInfo.cpp1904
-rw-r--r--lib/CodeGen/TargetInfo.h50
28 files changed, 3413 insertions, 1133 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 1bece7f..1fa422f 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -24,7 +24,7 @@ using namespace clang;
using namespace CodeGen;
llvm::Constant *CodeGenFunction::
-BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
+BuildDescriptorBlockDecl(bool BlockHasCopyDispose, CharUnits Size,
const llvm::StructType* Ty,
std::vector<HelperInfo> *NoteForHelper) {
const llvm::Type *UnsignedLongTy
@@ -40,7 +40,7 @@ BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
// FIXME: What is the right way to say this doesn't fit? We should give
// a user diagnostic in that case. Better fix would be to change the
// API to size_t.
- C = llvm::ConstantInt::get(UnsignedLongTy, Size);
+ C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity());
Elts.push_back(C);
if (BlockHasCopyDispose) {
@@ -176,7 +176,8 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// We run this first so that we set BlockHasCopyDispose from the entire
// block literal.
// __invoke
- uint64_t subBlockSize, subBlockAlign;
+ CharUnits subBlockSize;
+ uint64_t subBlockAlign;
llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
bool subBlockHasCopyDispose = false;
llvm::Function *Fn
@@ -321,13 +322,13 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// compared to gcc by not grabbing the forwarding slot as this must
// be done during Block_copy for us, and we can postpone the work
// until then.
- uint64_t offset = BlockDecls[BDRE->getDecl()];
+ CharUnits offset = BlockDecls[BDRE->getDecl()];
llvm::Value *BlockLiteral = LoadBlockStruct();
Loc = Builder.CreateGEP(BlockLiteral,
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset),
+ offset.getQuantity()),
"block.literal");
Ty = llvm::PointerType::get(Ty, 0);
Loc = Builder.CreateBitCast(Loc, Ty);
@@ -513,12 +514,12 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
return EmitCall(FnInfo, Func, ReturnValue, Args);
}
-uint64_t CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
+CharUnits CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
const ValueDecl *VD = E->getDecl();
- uint64_t &offset = BlockDecls[VD];
+ CharUnits &offset = BlockDecls[VD];
// See if we have already allocated an offset for this variable.
- if (offset)
+ if (offset.isPositive())
return offset;
// Don't run the expensive check, unless we have to.
@@ -535,13 +536,13 @@ uint64_t CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
const ValueDecl *VD = E->getDecl();
- uint64_t offset = AllocateBlockDecl(E);
+ CharUnits offset = AllocateBlockDecl(E);
llvm::Value *BlockLiteral = LoadBlockStruct();
llvm::Value *V = Builder.CreateGEP(BlockLiteral,
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset),
+ offset.getQuantity()),
"block.literal");
if (E->isByRef()) {
const llvm::Type *PtrStructTy
@@ -594,10 +595,10 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
// Block literal size. For global blocks we just use the size of the generic
// block literal struct.
- uint64_t BlockLiteralSize =
- TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8;
+ CharUnits BlockLiteralSize = CharUnits::fromQuantity(
+ TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8);
DescriptorFields[1] =
- llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize);
+ llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity());
llvm::Constant *DescriptorStruct =
llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false);
@@ -615,7 +616,8 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
std::vector<llvm::Constant*> LiteralFields(FieldCount);
CodeGenFunction::BlockInfo Info(0, n);
- uint64_t subBlockSize, subBlockAlign;
+ CharUnits subBlockSize;
+ uint64_t subBlockAlign;
llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
bool subBlockHasCopyDispose = false;
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
@@ -677,7 +679,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
const BlockInfo& Info,
const Decl *OuterFuncDecl,
llvm::DenseMap<const Decl*, llvm::Value*> ldm,
- uint64_t &Size,
+ CharUnits &Size,
uint64_t &Align,
llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
bool &subBlockHasCopyDispose) {
@@ -698,8 +700,9 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
LocalDeclMap[VD] = i->second;
}
- BlockOffset = CGM.getTargetData()
- .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8;
+ BlockOffset = CharUnits::fromQuantity(
+ CGM.getTargetData()
+ .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8);
BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
const FunctionType *BlockFunctionType = BExpr->getFunctionType();
@@ -799,7 +802,8 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
// The runtime needs a minimum alignment of a void *.
uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
- BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign);
+ BlockOffset = CharUnits::fromQuantity(
+ llvm::RoundUpToAlignment(BlockOffset.getQuantity(), MinAlign));
Size = BlockOffset;
Align = BlockAlign;
@@ -808,30 +812,32 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
return Fn;
}
-uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
+CharUnits BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl());
- uint64_t Size = getContext().getTypeSize(D->getType()) / 8;
+ CharUnits Size = getContext().getTypeSizeInChars(D->getType());
uint64_t Align = getContext().getDeclAlignInBytes(D);
if (BDRE->isByRef()) {
- Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8;
+ Size = getContext().getTypeSizeInChars(getContext().VoidPtrTy);
Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
}
assert ((Align > 0) && "alignment must be 1 byte or more");
- uint64_t OldOffset = BlockOffset;
+ CharUnits OldOffset = BlockOffset;
// Ensure proper alignment, even if it means we have to have a gap
- BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align);
+ BlockOffset = CharUnits::fromQuantity(
+ llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align));
BlockAlign = std::max(Align, BlockAlign);
- uint64_t Pad = BlockOffset - OldOffset;
- if (Pad) {
- llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad);
+ CharUnits Pad = BlockOffset - OldOffset;
+ if (Pad.isPositive()) {
+ llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad.getQuantity());
QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
- llvm::APInt(32, Pad),
+ llvm::APInt(32,
+ Pad.getQuantity()),
ArrayType::Normal, 0);
ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
0, QualType(PadTy), 0, VarDecl::None);
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 38e02a7..f42244c 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -175,13 +176,13 @@ public:
/// BlockOffset - The offset in bytes for the next allocation of an
/// imported block variable.
- uint64_t BlockOffset;
+ CharUnits BlockOffset;
/// BlockAlign - Maximal alignment needed for the Block expressed in bytes.
uint64_t BlockAlign;
/// getBlockOffset - Allocate an offset for the ValueDecl from a
/// BlockDeclRefExpr in a block literal (BlockExpr).
- uint64_t getBlockOffset(const BlockDeclRefExpr *E);
+ CharUnits getBlockOffset(const BlockDeclRefExpr *E);
/// BlockHasCopyDispose - True iff the block uses copy/dispose.
bool BlockHasCopyDispose;
@@ -191,7 +192,7 @@ public:
llvm::SmallVector<const Expr *, 8> BlockDeclRefDecls;
/// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
- std::map<const Decl*, uint64_t> BlockDecls;
+ std::map<const Decl*, CharUnits> BlockDecls;
ImplicitParamDecl *BlockStructDecl;
ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index cc006d9..4323f84 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -26,269 +26,7 @@
using namespace clang;
using namespace CodeGen;
-RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
- llvm::Value *Callee,
- ReturnValueSlot ReturnValue,
- llvm::Value *This,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
- assert(MD->isInstance() &&
- "Trying to emit a member call expr on a static method!");
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
-
- CallArgList Args;
-
- // Push the this ptr.
- Args.push_back(std::make_pair(RValue::get(This),
- MD->getThisType(getContext())));
-
- // And the rest of the call args
- EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
-
- QualType ResultType = MD->getType()->getAs<FunctionType>()->getResultType();
- return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), Callee,
- ReturnValue, Args, MD);
-}
-
-/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
-/// expr can be devirtualized.
-static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- // This is a record decl. We know the type and can devirtualize it.
- return VD->getType()->isRecordType();
- }
-
- return false;
- }
-
- // We can always devirtualize calls on temporary object expressions.
- if (isa<CXXTemporaryObjectExpr>(Base))
- return true;
-
- // And calls on bound temporaries.
- if (isa<CXXBindTemporaryExpr>(Base))
- return true;
-
- // Check if this is a call expr that returns a record type.
- if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
- return CE->getCallReturnType()->isRecordType();
-
- // We can't devirtualize the call.
- return false;
-}
-
-RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
- ReturnValueSlot ReturnValue) {
- if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
- return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
-
- const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
-
- if (MD->isStatic()) {
- // The method is static, emit it as we would a regular call.
- llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
- return EmitCall(getContext().getPointerType(MD->getType()), Callee,
- ReturnValue, CE->arg_begin(), CE->arg_end());
- }
-
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
-
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Value *This;
-
- if (ME->isArrow())
- This = EmitScalarExpr(ME->getBase());
- else {
- LValue BaseLV = EmitLValue(ME->getBase());
- This = BaseLV.getAddress();
- }
-
- if (MD->isCopyAssignment() && MD->isTrivial()) {
- // We don't like to generate the trivial copy assignment operator when
- // it isn't necessary; just produce the proper effect here.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateCopy(This, RHS, CE->getType());
- return RValue::get(This);
- }
-
- // C++ [class.virtual]p12:
- // Explicit qualification with the scope operator (5.1) suppresses the
- // virtual call mechanism.
- //
- // We also don't emit a virtual call if the base expression has a record type
- // because then we know what the type is.
- llvm::Value *Callee;
- if (const CXXDestructorDecl *Destructor
- = dyn_cast<CXXDestructorDecl>(MD)) {
- if (Destructor->isTrivial())
- return RValue::get(0);
- if (MD->isVirtual() && !ME->hasQualifier() &&
- !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
- Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty);
- } else {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
- }
- } else if (MD->isVirtual() && !ME->hasQualifier() &&
- !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
- Callee = BuildVirtualCall(MD, This, Ty);
- } else {
- Callee = CGM.GetAddrOfFunction(MD, Ty);
- }
-
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This,
- CE->arg_begin(), CE->arg_end());
-}
-
-RValue
-CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
- ReturnValueSlot ReturnValue) {
- const BinaryOperator *BO =
- cast<BinaryOperator>(E->getCallee()->IgnoreParens());
- const Expr *BaseExpr = BO->getLHS();
- const Expr *MemFnExpr = BO->getRHS();
-
- const MemberPointerType *MPT =
- MemFnExpr->getType()->getAs<MemberPointerType>();
- const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
-
- const llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
-
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8Ty(VMContext)->getPointerTo();
-
- // Get the member function pointer.
- llvm::Value *MemFnPtr =
- CreateTempAlloca(ConvertType(MemFnExpr->getType()), "mem.fn");
- EmitAggExpr(MemFnExpr, MemFnPtr, /*VolatileDest=*/false);
-
- // Emit the 'this' pointer.
- llvm::Value *This;
-
- if (BO->getOpcode() == BinaryOperator::PtrMemI)
- This = EmitScalarExpr(BaseExpr);
- else
- This = EmitLValue(BaseExpr).getAddress();
-
- // Adjust it.
- llvm::Value *Adj = Builder.CreateStructGEP(MemFnPtr, 1);
- Adj = Builder.CreateLoad(Adj, "mem.fn.adj");
-
- llvm::Value *Ptr = Builder.CreateBitCast(This, Int8PtrTy, "ptr");
- Ptr = Builder.CreateGEP(Ptr, Adj, "adj");
-
- This = Builder.CreateBitCast(Ptr, This->getType(), "this");
-
- llvm::Value *FnPtr = Builder.CreateStructGEP(MemFnPtr, 0, "mem.fn.ptr");
-
- const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
-
- llvm::Value *FnAsInt = Builder.CreateLoad(FnPtr, "fn");
-
- // If the LSB in the function pointer is 1, the function pointer points to
- // a virtual function.
- llvm::Value *IsVirtual
- = Builder.CreateAnd(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1),
- "and");
-
- IsVirtual = Builder.CreateTrunc(IsVirtual,
- llvm::Type::getInt1Ty(VMContext));
-
- llvm::BasicBlock *FnVirtual = createBasicBlock("fn.virtual");
- llvm::BasicBlock *FnNonVirtual = createBasicBlock("fn.nonvirtual");
- llvm::BasicBlock *FnEnd = createBasicBlock("fn.end");
-
- Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
- EmitBlock(FnVirtual);
-
- const llvm::Type *VTableTy =
- FTy->getPointerTo()->getPointerTo()->getPointerTo();
-
- llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy);
- VTable = Builder.CreateLoad(VTable);
-
- VTable = Builder.CreateGEP(VTable, FnAsInt, "fn");
-
- // Since the function pointer is 1 plus the virtual table offset, we
- // subtract 1 by using a GEP.
- VTable = Builder.CreateConstGEP1_64(VTable, (uint64_t)-1);
-
- llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "virtualfn");
-
- EmitBranch(FnEnd);
- EmitBlock(FnNonVirtual);
-
- // If the function is not virtual, just load the pointer.
- llvm::Value *NonVirtualFn = Builder.CreateLoad(FnPtr, "fn");
- NonVirtualFn = Builder.CreateIntToPtr(NonVirtualFn, FTy->getPointerTo());
-
- EmitBlock(FnEnd);
-
- llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
- Callee->reserveOperandSpace(2);
- Callee->addIncoming(VirtualFn, FnVirtual);
- Callee->addIncoming(NonVirtualFn, FnNonVirtual);
-
- CallArgList Args;
-
- QualType ThisType =
- getContext().getPointerType(getContext().getTagDeclType(RD));
-
- // Push the this ptr.
- Args.push_back(std::make_pair(RValue::get(This), ThisType));
-
- // And the rest of the call args
- EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- QualType ResultType = BO->getType()->getAs<FunctionType>()->getResultType();
- return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), Callee,
- ReturnValue, Args);
-}
-
-RValue
-CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
- const CXXMethodDecl *MD,
- ReturnValueSlot ReturnValue) {
- assert(MD->isInstance() &&
- "Trying to emit a member call expr on a static method!");
-
- if (MD->isCopyAssignment()) {
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
- if (ClassDecl->hasTrivialCopyAssignment()) {
- assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
- "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
- llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
- llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
- QualType Ty = E->getType();
- EmitAggregateCopy(This, Src, Ty);
- return RValue::get(This);
- }
- }
-
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
-
- llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
-
- llvm::Value *Callee;
- if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
- Callee = BuildVirtualCall(MD, This, Ty);
- else
- Callee = CGM.GetAddrOfFunction(MD, Ty);
-
- return EmitCXXMemberCall(MD, Callee, ReturnValue, This,
- E->arg_begin() + 1, E->arg_end());
-}
llvm::Value *CodeGenFunction::LoadCXXThis() {
assert(isa<CXXMethodDecl>(CurFuncDecl) &&
@@ -302,320 +40,6 @@ llvm::Value *CodeGenFunction::LoadCXXThis() {
return Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
}
-/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
-/// for-loop to call the default constructor on individual members of the
-/// array.
-/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
-/// array type and 'ArrayPtr' points to the beginning fo the array.
-/// It is assumed that all relevant checks have been made by the caller.
-void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
- const ConstantArrayType *ArrayTy,
- llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
-
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
- llvm::Value * NumElements =
- llvm::ConstantInt::get(SizeTy,
- getContext().getConstantArrayElementCount(ArrayTy));
-
- EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd);
-}
-
-void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
- llvm::Value *NumElements,
- llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
-
- // Create a temporary for the loop index and initialize it with 0.
- llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
- llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
- Builder.CreateStore(Zero, IndexPtr);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
- // Generate: if (loop-index < number-of-elements fall to the loop body,
- // otherwise, go to the block after the for-loop.
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- EmitBlock(ForBody);
-
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
- // Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
- "arrayidx");
-
- // C++ [class.temporary]p4:
- // There are two contexts in which temporaries are destroyed at a different
- // point than the end of the full-expression. The first context is when a
- // default constructor is called to initialize an element of an array.
- // If the constructor has one or more default arguments, the destruction of
- // every temporary created in a default argument expression is sequenced
- // before the construction of the next array element, if any.
-
- // Keep track of the current number of live temporaries.
- unsigned OldNumLiveTemporaries = LiveTemporaries.size();
-
- EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd);
-
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
-
- EmitBlock(ContinueBlock);
-
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
- Counter = Builder.CreateLoad(IndexPtr);
- NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
- Builder.CreateStore(NextVal, IndexPtr);
-
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
-}
-
-/// EmitCXXAggrDestructorCall - calls the default destructor on array
-/// elements in reverse order of construction.
-void
-CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
- const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
- assert(CA && "Do we support VLA for destruction ?");
- uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
-
- const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
- llvm::Value* ElementCountPtr = llvm::ConstantInt::get(SizeLTy, ElementCount);
- EmitCXXAggrDestructorCall(D, ElementCountPtr, This);
-}
-
-/// EmitCXXAggrDestructorCall - calls the default destructor on array
-/// elements in reverse order of construction.
-void
-CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- llvm::Value *UpperCount,
- llvm::Value *This) {
- const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
- llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
-
- // Create a temporary for the loop index and initialize it with count of
- // array elements.
- llvm::Value *IndexPtr = CreateTempAlloca(SizeLTy, "loop.index");
-
- // Store the number of elements in the index pointer.
- Builder.CreateStore(UpperCount, IndexPtr);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
- // Generate: if (loop-index != 0 fall to the loop body,
- // otherwise, go to the block after the for-loop.
- llvm::Value* zeroConstant =
- llvm::Constant::getNullValue(SizeLTy);
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
- "isne");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsNE, ForBody, AfterFor);
-
- EmitBlock(ForBody);
-
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
- // Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- Counter = Builder.CreateSub(Counter, One);
- llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
- EmitCXXDestructorCall(D, Dtor_Complete, Address);
-
- EmitBlock(ContinueBlock);
-
- // Emit the decrement of the loop counter.
- Counter = Builder.CreateLoad(IndexPtr);
- Counter = Builder.CreateSub(Counter, One, "dec");
- Builder.CreateStore(Counter, IndexPtr);
-
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
-}
-
-/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
-/// invoked, calls the default destructor on array elements in reverse order of
-/// construction.
-llvm::Constant *
-CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
- FunctionArgList Args;
- ImplicitParamDecl *Dst =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Dst, Dst->getType()));
-
- llvm::SmallString<16> Name;
- llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount);
- QualType R = getContext().VoidTy;
- const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(R, Args);
- const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
- llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- Name.str(),
- &CGM.getModule());
- IdentifierInfo *II = &CGM.getContext().Idents.get(Name.str());
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
- FunctionDecl::Static,
- false, true);
- StartFunction(FD, R, Fn, Args, SourceLocation());
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
- FinishFunction();
- llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
- 0);
- llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
- return m;
-}
-
-void
-CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
- CXXCtorType Type,
- llvm::Value *This,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
- if (D->isCopyConstructor()) {
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(D->getDeclContext());
- if (ClassDecl->hasTrivialCopyConstructor()) {
- assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
- "EmitCXXConstructorCall - user declared copy constructor");
- const Expr *E = (*ArgBeg);
- QualType Ty = E->getType();
- llvm::Value *Src = EmitLValue(E).getAddress();
- EmitAggregateCopy(This, Src, Ty);
- return;
- }
- } else if (D->isTrivial()) {
- // FIXME: Track down why we're trying to generate calls to the trivial
- // default constructor!
- return;
- }
-
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
-
- EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, ArgBeg, ArgEnd);
-}
-
-void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
- CXXDtorType Type,
- llvm::Value *This) {
- llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
-
- CallArgList Args;
-
- // Push the this ptr.
- Args.push_back(std::make_pair(RValue::get(This),
- DD->getThisType(getContext())));
-
- // Add a VTT parameter if necessary.
- // FIXME: This should not be a dummy null parameter!
- if (Type == Dtor_Base && DD->getParent()->getNumVBases() != 0) {
- QualType T = getContext().getPointerType(getContext().VoidPtrTy);
-
- Args.push_back(std::make_pair(RValue::get(CGM.EmitNullConstant(T)), T));
- }
-
- // FIXME: We should try to share this code with EmitCXXMemberCall.
-
- QualType ResultType = DD->getType()->getAs<FunctionType>()->getResultType();
- EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), Callee,
- ReturnValueSlot(), Args, DD);
-}
-
-void
-CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
- const CXXConstructExpr *E) {
- assert(Dest && "Must have a destination!");
- const CXXConstructorDecl *CD = E->getConstructor();
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(E->getType());
- // For a copy constructor, even if it is trivial, must fall thru so
- // its argument is code-gen'ed.
- if (!CD->isCopyConstructor()) {
- QualType InitType = E->getType();
- if (Array)
- InitType = getContext().getBaseElementType(Array);
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(InitType->getAs<RecordType>()->getDecl());
- if (RD->hasTrivialConstructor())
- return;
- }
- // Code gen optimization to eliminate copy constructor and return
- // its first argument instead.
- if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
- const Expr *Arg = E->getArg(0);
-
- if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
- assert((ICE->getCastKind() == CastExpr::CK_NoOp ||
- ICE->getCastKind() == CastExpr::CK_ConstructorConversion ||
- ICE->getCastKind() == CastExpr::CK_UserDefinedConversion) &&
- "Unknown implicit cast kind in constructor elision");
- Arg = ICE->getSubExpr();
- }
-
- if (const CXXFunctionalCastExpr *FCE = dyn_cast<CXXFunctionalCastExpr>(Arg))
- Arg = FCE->getSubExpr();
-
- if (const CXXBindTemporaryExpr *BindExpr =
- dyn_cast<CXXBindTemporaryExpr>(Arg))
- Arg = BindExpr->getSubExpr();
-
- EmitAggExpr(Arg, Dest, false);
- return;
- }
- if (Array) {
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Dest, BasePtr);
-
- EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
- E->arg_begin(), E->arg_end());
- }
- else
- // Call the constructor.
- EmitCXXConstructorCall(CD, Ctor_Complete, Dest,
- E->arg_begin(), E->arg_end());
-}
-
void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
EmitGlobal(GlobalDecl(D, Ctor_Complete));
EmitGlobal(GlobalDecl(D, Ctor_Base));
@@ -1001,33 +425,6 @@ CodeGenModule::BuildCovariantThunk(const GlobalDecl &GD, bool Extern,
return m;
}
-llvm::Value *
-CodeGenFunction::GetVirtualCXXBaseClassOffset(llvm::Value *This,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8Ty(VMContext)->getPointerTo();
-
- llvm::Value *VTablePtr = Builder.CreateBitCast(This,
- Int8PtrTy->getPointerTo());
- VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
-
- int64_t VBaseOffsetIndex =
- CGM.getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
-
- llvm::Value *VBaseOffsetPtr =
- Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetIndex, "vbase.offset.ptr");
- const llvm::Type *PtrDiffTy =
- ConvertType(getContext().getPointerDiffType());
-
- VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
- PtrDiffTy->getPointerTo());
-
- llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
-
- return VBaseOffset;
-}
-
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VtableIndex,
llvm::Value *This, const llvm::Type *Ty) {
Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
@@ -1058,71 +455,3 @@ CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
return ::BuildVirtualCall(*this, VtableIndex, This, Ty);
}
-
-void CodeGenFunction::InitializeVtablePtrs(const CXXRecordDecl *ClassDecl) {
- if (!ClassDecl->isDynamicClass())
- return;
-
- llvm::Constant *Vtable = CGM.getVtableInfo().getVtable(ClassDecl);
- CodeGenModule::AddrSubMap_t& AddressPoints =
- *(*CGM.AddressPoints[ClassDecl])[ClassDecl];
- llvm::Value *ThisPtr = LoadCXXThis();
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
-
- // Store address points for virtual bases
- for (CXXRecordDecl::base_class_const_iterator I =
- ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); I != E; ++I) {
- const CXXBaseSpecifier &Base = *I;
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
- uint64_t Offset = Layout.getVBaseClassOffset(BaseClassDecl);
- InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
- ThisPtr, Offset);
- }
-
- // Store address points for non-virtual bases and current class
- InitializeVtablePtrsRecursive(ClassDecl, Vtable, AddressPoints, ThisPtr, 0);
-}
-
-void CodeGenFunction::InitializeVtablePtrsRecursive(
- const CXXRecordDecl *ClassDecl,
- llvm::Constant *Vtable,
- CodeGenModule::AddrSubMap_t& AddressPoints,
- llvm::Value *ThisPtr,
- uint64_t Offset) {
- if (!ClassDecl->isDynamicClass())
- return;
-
- // Store address points for non-virtual bases
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
- for (CXXRecordDecl::base_class_const_iterator I =
- ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
- const CXXBaseSpecifier &Base = *I;
- if (Base.isVirtual())
- continue;
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
- uint64_t NewOffset = Offset + Layout.getBaseClassOffset(BaseClassDecl);
- InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
- ThisPtr, NewOffset);
- }
-
- // Compute the address point
- assert(AddressPoints.count(std::make_pair(ClassDecl, Offset)) &&
- "Missing address point for class");
- uint64_t AddressPoint = AddressPoints[std::make_pair(ClassDecl, Offset)];
- llvm::Value *VtableAddressPoint =
- Builder.CreateConstInBoundsGEP2_64(Vtable, 0, AddressPoint);
-
- // Compute the address to store the address point
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- llvm::Value *VtableField = Builder.CreateBitCast(ThisPtr, Int8PtrTy);
- VtableField = Builder.CreateConstInBoundsGEP1_64(VtableField, Offset/8);
- const llvm::Type *AddressPointPtrTy =
- VtableAddressPoint->getType()->getPointerTo();
- VtableField = Builder.CreateBitCast(VtableField, AddressPointPtrTy);
-
- // Store address point
- Builder.CreateStore(VtableAddressPoint, VtableField);
-}
-
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 953b8c8..ab3fece 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -431,6 +431,37 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
EmitBlock(AfterFor, true);
}
+/// GetVTTParameter - Return the VTT parameter that should be passed to a
+/// base constructor/destructor with virtual bases.
+static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
+ if (!CGVtableInfo::needsVTTParameter(GD)) {
+ // This constructor/destructor does not need a VTT parameter.
+ return 0;
+ }
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
+ const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ llvm::Value *VTT;
+
+ uint64_t SubVTTIndex =
+ CGF.CGM.getVtableInfo().getSubVTTIndex(RD, Base);
+ assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+
+ if (CGVtableInfo::needsVTTParameter(CGF.CurGD)) {
+ // A VTT parameter was passed to the constructor, use it.
+ VTT = CGF.LoadCXXVTT();
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+ } else {
+ // We're the complete constructor, so get the VTT by name.
+ VTT = CGF.CGM.getVtableInfo().getVTT(RD);
+ VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+ }
+
+ return VTT;
+}
+
+
/// EmitClassMemberwiseCopy - This routine generates code to copy a class
/// object from SrcValue to DestValue. Copying can be either a bitwise copy
/// or via a copy constructor call.
@@ -438,11 +469,16 @@ void CodeGenFunction::EmitClassMemberwiseCopy(
llvm::Value *Dest, llvm::Value *Src,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl, QualType Ty) {
+ CXXCtorType CtorType = Ctor_Complete;
+
if (ClassDecl) {
Dest = GetAddressOfBaseClass(Dest, ClassDecl, BaseClassDecl,
/*NullCheckValue=*/false);
Src = GetAddressOfBaseClass(Src, ClassDecl, BaseClassDecl,
/*NullCheckValue=*/false);
+
+ // We want to call the base constructor.
+ CtorType = Ctor_Base;
}
if (BaseClassDecl->hasTrivialCopyConstructor()) {
EmitAggregateCopy(Dest, Src, Ty);
@@ -451,13 +487,19 @@ void CodeGenFunction::EmitClassMemberwiseCopy(
if (CXXConstructorDecl *BaseCopyCtor =
BaseClassDecl->getCopyConstructor(getContext(), 0)) {
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
- Ctor_Complete);
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor, CtorType);
CallArgList CallArgs;
// Push the this (Dest) ptr.
CallArgs.push_back(std::make_pair(RValue::get(Dest),
BaseCopyCtor->getThisType(getContext())));
+ // Push the VTT parameter, if necessary.
+ if (llvm::Value *VTT =
+ GetVTTParameter(*this, GlobalDecl(BaseCopyCtor, CtorType))) {
+ QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+ CallArgs.push_back(std::make_pair(RValue::get(VTT), T));
+ }
+
// Push the Src ptr.
CallArgs.push_back(std::make_pair(RValue::get(Src),
BaseCopyCtor->getParamDecl(0)->getType()));
@@ -787,10 +829,8 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
V = CGF.Builder.CreateConstInBoundsGEP1_64(V, Offset/8);
V = CGF.Builder.CreateBitCast(V, BaseClassType->getPointerTo());
- // FIXME: This should always use Ctor_Base as the ctor type! (But that
- // causes crashes in tests.)
CGF.EmitCXXConstructorCall(BaseInit->getConstructor(),
- CtorType, V,
+ Ctor_Base, V,
BaseInit->const_arg_begin(),
BaseInit->const_arg_end());
}
@@ -1044,3 +1084,347 @@ void CodeGenFunction::SynthesizeDefaultDestructor(const CXXDestructorDecl *Dtor,
EmitDtorEpilogue(Dtor, DtorType);
FinishFunction();
}
+
+/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
+/// for-loop to call the default constructor on individual members of the
+/// array.
+/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
+/// array type and 'ArrayPtr' points to the beginning fo the array.
+/// It is assumed that all relevant checks have been made by the caller.
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value * NumElements =
+ llvm::ConstantInt::get(SizeTy,
+ getContext().getConstantArrayElementCount(ArrayTy));
+
+ EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd);
+}
+
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
+ "arrayidx");
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+
+ // Keep track of the current number of live temporaries.
+ unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+ EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd);
+
+ // Pop temporaries.
+ while (LiveTemporaries.size() > OldNumLiveTemporaries)
+ PopCXXTemporary();
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+ assert(CA && "Do we support VLA for destruction ?");
+ uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
+
+ const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+ llvm::Value* ElementCountPtr = llvm::ConstantInt::get(SizeLTy, ElementCount);
+ EmitCXXAggrDestructorCall(D, ElementCountPtr, This);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ llvm::Value *UpperCount,
+ llvm::Value *This) {
+ const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+ llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
+
+ // Create a temporary for the loop index and initialize it with count of
+ // array elements.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeLTy, "loop.index");
+
+ // Store the number of elements in the index pointer.
+ Builder.CreateStore(UpperCount, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index != 0 fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value* zeroConstant =
+ llvm::Constant::getNullValue(SizeLTy);
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
+ "isne");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsNE, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
+ EmitCXXDestructorCall(D, Dtor_Complete, Address);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the decrement of the loop counter.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One, "dec");
+ Builder.CreateStore(Counter, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
+/// invoked, calls the default destructor on array elements in reverse order of
+/// construction.
+llvm::Constant *
+CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ FunctionArgList Args;
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ llvm::SmallString<16> Name;
+ llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount);
+ QualType R = getContext().VoidTy;
+ const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(R, Args);
+ const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name.str(),
+ &CGM.getModule());
+ IdentifierInfo *II = &CGM.getContext().Idents.get(Name.str());
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ FunctionDecl::Static,
+ false, true);
+ StartFunction(FD, R, Fn, Args, SourceLocation());
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+ FinishFunction();
+ llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
+ 0);
+ llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
+ return m;
+}
+
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isCopyConstructor()) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(D->getDeclContext());
+ if (ClassDecl->hasTrivialCopyConstructor()) {
+ assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
+ "EmitCXXConstructorCall - user declared copy constructor");
+ const Expr *E = (*ArgBeg);
+ QualType Ty = E->getType();
+ llvm::Value *Src = EmitLValue(E).getAddress();
+ EmitAggregateCopy(This, Src, Ty);
+ return;
+ }
+ } else if (D->isTrivial()) {
+ // FIXME: Track down why we're trying to generate calls to the trivial
+ // default constructor!
+ return;
+ }
+
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type));
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ llvm::Value *This) {
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type));
+ llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+
+ EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+}
+
+llvm::Value *
+CodeGenFunction::GetVirtualCXXBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ llvm::Value *VTablePtr = Builder.CreateBitCast(This,
+ Int8PtrTy->getPointerTo());
+ VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
+
+ int64_t VBaseOffsetIndex =
+ CGM.getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
+
+ llvm::Value *VBaseOffsetPtr =
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetIndex, "vbase.offset.ptr");
+ const llvm::Type *PtrDiffTy =
+ ConvertType(getContext().getPointerDiffType());
+
+ VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
+ PtrDiffTy->getPointerTo());
+
+ llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+
+ return VBaseOffset;
+}
+
+void CodeGenFunction::InitializeVtablePtrs(const CXXRecordDecl *ClassDecl) {
+ if (!ClassDecl->isDynamicClass())
+ return;
+
+ llvm::Constant *Vtable = CGM.getVtableInfo().getVtable(ClassDecl);
+ CGVtableInfo::AddrSubMap_t& AddressPoints =
+ *(*CGM.getVtableInfo().AddressPoints[ClassDecl])[ClassDecl];
+ llvm::Value *ThisPtr = LoadCXXThis();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
+
+ // Store address points for virtual bases
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ uint64_t Offset = Layout.getVBaseClassOffset(BaseClassDecl);
+ InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
+ ThisPtr, Offset);
+ }
+
+ // Store address points for non-virtual bases and current class
+ InitializeVtablePtrsRecursive(ClassDecl, Vtable, AddressPoints, ThisPtr, 0);
+}
+
+void CodeGenFunction::InitializeVtablePtrsRecursive(
+ const CXXRecordDecl *ClassDecl,
+ llvm::Constant *Vtable,
+ CGVtableInfo::AddrSubMap_t& AddressPoints,
+ llvm::Value *ThisPtr,
+ uint64_t Offset) {
+ if (!ClassDecl->isDynamicClass())
+ return;
+
+ // Store address points for non-virtual bases
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+ if (Base.isVirtual())
+ continue;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+ uint64_t NewOffset = Offset + Layout.getBaseClassOffset(BaseClassDecl);
+ InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
+ ThisPtr, NewOffset);
+ }
+
+ // Compute the address point
+ assert(AddressPoints.count(std::make_pair(ClassDecl, Offset)) &&
+ "Missing address point for class");
+ uint64_t AddressPoint = AddressPoints[std::make_pair(ClassDecl, Offset)];
+ llvm::Value *VtableAddressPoint =
+ Builder.CreateConstInBoundsGEP2_64(Vtable, 0, AddressPoint);
+
+ // Compute the address to store the address point
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Value *VtableField = Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+ VtableField = Builder.CreateConstInBoundsGEP1_64(VtableField, Offset/8);
+ const llvm::Type *AddressPointPtrTy =
+ VtableAddressPoint->getType()->getPointerTo();
+ VtableField = Builder.CreateBitCast(VtableField, AddressPointPtrTy);
+
+ // Store address point
+ Builder.CreateStore(VtableAddressPoint, VtableField);
+}
+
+llvm::Value *CodeGenFunction::LoadCXXVTT() {
+ assert((isa<CXXConstructorDecl>(CurFuncDecl) ||
+ isa<CXXDestructorDecl>(CurFuncDecl)) &&
+ "Must be in a C++ ctor or dtor to load the vtt parameter");
+
+ return Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
+}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 19695c8..ab8f663 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -65,6 +65,25 @@ llvm::DIDescriptor CGDebugInfo::getContext(const VarDecl *Decl,
return CompileUnit;
}
+/// getFunctionName - Get function name for the given FunctionDecl. If the
+/// name is constructred on demand (e.g. C++ destructor) then the name
+/// is stored on the side.
+llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+ assert (FD && "Invalid FunctionDecl!");
+ IdentifierInfo *FII = FD->getIdentifier();
+ if (FII)
+ return FII->getName();
+
+ // Otherwise construct human readable name for debug info.
+ std::string NS = FD->getNameAsString();
+
+ // Copy this name on the side and use its reference.
+ unsigned Length = NS.length() + 1;
+ char *StrPtr = FunctionNames.Allocate<char>(Length);
+ strncpy(StrPtr, NS.c_str(), Length);
+ return llvm::StringRef(StrPtr);
+}
+
/// getOrCreateCompileUnit - Get the compile unit from the cache or create a new
/// one if necessary. This returns null for invalid source locations.
llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
@@ -972,18 +991,32 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
/// EmitFunctionStart - Constructs the debug code for entering a function -
/// "llvm.dbg.func.start.".
-void CGDebugInfo::EmitFunctionStart(llvm::StringRef Name, QualType FnType,
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::Function *Fn,
CGBuilderTy &Builder) {
- llvm::StringRef LinkageName(Name);
- // Skip the asm prefix if it exists.
- //
- // FIXME: This should probably be the unmangled name?
- if (Name[0] == '\01')
- Name = Name.substr(1);
+ llvm::StringRef Name;
+ llvm::StringRef LinkageName;
+
+ const Decl *D = GD.getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ Name = getFunctionName(FD);
+ if (Name[0] == '\01')
+ Name = Name.substr(1);
+ // Use mangled name as linkage name for c/c++ functions.
+ LinkageName = CGM.getMangledName(GD);
+ } else {
+ // Use llvm function name as linkage name.
+ Name = Fn->getName();
+ // Skip the asm prefix if it exists.
+ if (Name[0] == '\01')
+ Name = Name.substr(1);
+ LinkageName = Name;
+ }
- // FIXME: Why is this using CurLoc???
+ // It is expected that CurLoc is set before using EmitFunctionStart.
+ // Usually, CurLoc points to the left bracket location of compound
+ // statement representing function body.
llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
SourceManager &SM = CGM.getContext().getSourceManager();
unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine();
@@ -1379,7 +1412,7 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
else
Unit = llvm::DICompileUnit();
- uint64_t offset = CGF->BlockDecls[Decl];
+ CharUnits offset = CGF->BlockDecls[Decl];
llvm::SmallVector<llvm::Value *, 9> addr;
llvm::LLVMContext &VMContext = CGM.getLLVMContext();
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
@@ -1387,22 +1420,24 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
llvm::DIFactory::OpPlus));
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset));
+ offset.getQuantity()));
if (BDRE->isByRef()) {
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
llvm::DIFactory::OpDeref));
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
llvm::DIFactory::OpPlus));
- offset = CGF->LLVMPointerWidth/8; // offset of __forwarding field
+ // offset of __forwarding field
+ offset = CharUnits::fromQuantity(CGF->LLVMPointerWidth/8);
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset));
+ offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
llvm::DIFactory::OpDeref));
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
llvm::DIFactory::OpPlus));
- offset = XOffset/8; // offset of x field
+ // offset of x field
+ offset = CharUnits::fromQuantity(XOffset/8);
addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset));
+ offset.getQuantity()));
}
// Create the descriptor for the variable.
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 7df2a62..8e88988 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
#include <map>
#include "CGBuilder.h"
@@ -35,6 +36,7 @@ namespace clang {
namespace CodeGen {
class CodeGenModule;
class CodeGenFunction;
+ class GlobalDecl;
/// CGDebugInfo - This class gathers all debug information during compilation
/// and is responsible for emitting to llvm globals or pass directly to
@@ -58,6 +60,10 @@ class CGDebugInfo {
std::vector<llvm::TrackingVH<llvm::MDNode> > RegionStack;
+ /// FunctionNames - This is a storage for function names that are
+ /// constructed on demand. For example, C++ destructors, C++ operators etc..
+ llvm::BumpPtrAllocator FunctionNames;
+
/// Helper functions for getOrCreateType.
llvm::DIType CreateType(const BuiltinType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U);
@@ -93,7 +99,7 @@ public:
/// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
/// start of a new function.
- void EmitFunctionStart(llvm::StringRef Name, QualType FnType,
+ void EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::Function *Fn, CGBuilderTy &Builder);
/// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
@@ -149,6 +155,11 @@ private:
/// CreateTypeNode - Create type metadata for a source language type.
llvm::DIType CreateTypeNode(QualType Ty, llvm::DICompileUnit Unit);
+
+ /// getFunctionName - Get function name for the given FunctionDecl. If the
+ /// name is constructred on demand (e.g. C++ destructor) then the name
+ /// is stored on the side.
+ llvm::StringRef getFunctionName(const FunctionDecl *FD);
};
} // namespace CodeGen
} // namespace clang
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 602cc9e..9606a71 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -473,7 +473,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
llvm::IntegerType::get(VMContext, LLVMPointerWidth);
llvm::Value *SizeVal =
llvm::ConstantInt::get(IntPtr,
- getContext().getTypeSizeInChars(Ty).getRaw());
+ getContext().getTypeSizeInChars(Ty).getQuantity());
const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
if (Loc->getType() != BP)
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 0b6ea5a..47773a0 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -120,6 +120,22 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
}
void
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create a variable initialization function.
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ "__cxx_global_var_init", &TheModule);
+
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
+
+ CXXGlobalInits.push_back(Fn);
+}
+
+void
CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty())
return;
@@ -140,18 +156,26 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
AddGlobalCtor(Fn);
}
+void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
+ EmitCXXGlobalVarDeclInit(*D, DeclPtr);
+
+ FinishFunction();
+}
+
void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- const VarDecl **Decls,
+ llvm::Constant **Decls,
unsigned NumDecls) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
SourceLocation());
- for (unsigned i = 0; i != NumDecls; ++i) {
- const VarDecl *D = Decls[i];
+ for (unsigned i = 0; i != NumDecls; ++i)
+ Builder.CreateCall(Decls[i]);
- llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
- EmitCXXGlobalVarDeclInit(*D, DeclPtr);
- }
FinishFunction();
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index ab451cf..2358bb3 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -240,6 +240,132 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
EmitBlock(Cont);
}
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ QualType ValTy = E->getSubExpr()->getType();
+ llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal();
+
+ int AmountVal = isInc ? 1 : -1;
+
+ if (ValTy->isPointerType() &&
+ ValTy->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition/subtraction needs to account for the VLA size
+ ErrorUnsupported(E, "VLA pointer inc/dec");
+ }
+
+ llvm::Value *NextVal;
+ if (const llvm::PointerType *PT =
+ dyn_cast<llvm::PointerType>(InVal->getType())) {
+ llvm::Constant *Inc =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
+ if (!isa<llvm::FunctionType>(PT->getElementType())) {
+ QualType PTEE = ValTy->getPointeeType();
+ if (const ObjCInterfaceType *OIT =
+ dyn_cast<ObjCInterfaceType>(PTEE)) {
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ int size = getContext().getTypeSize(OIT) / 8;
+ if (!isInc)
+ size = -size;
+ Inc = llvm::ConstantInt::get(Inc->getType(), size);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ InVal = Builder.CreateBitCast(InVal, i8Ty);
+ NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
+ llvm::Value *lhs = LV.getAddress();
+ lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
+ LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy));
+ } else
+ NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+ } else {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+ NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+ NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ }
+ } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) {
+ // Bool++ is an interesting case, due to promotion rules, we get:
+ // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+ // Bool = ((int)Bool+1) != 0
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ NextVal = llvm::ConstantInt::getTrue(VMContext);
+ } else if (isa<llvm::IntegerType>(InVal->getType())) {
+ NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+
+ // Signed integer overflow is undefined behavior.
+ if (ValTy->isSignedIntegerType())
+ NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ else
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ } else {
+ // Add the inc/dec to the real part.
+ if (InVal->getType()->isFloatTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType()->isDoubleTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(AmountVal)));
+ else {
+ llvm::APFloat F(static_cast<float>(AmountVal));
+ bool ignored;
+ F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ NextVal = llvm::ConstantFP::get(VMContext, F);
+ }
+ NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitfield())
+ EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+ else
+ EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? NextVal : InVal;
+}
+
+
+CodeGenFunction::ComplexPairTy CodeGenFunction::
+EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ } else {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ }
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+
//===----------------------------------------------------------------------===//
// LValue Expression Emission
//===----------------------------------------------------------------------===//
@@ -994,8 +1120,16 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
MakeQualifiers(ExprTy));
}
case UnaryOperator::PreInc:
- case UnaryOperator::PreDec:
- return EmitUnsupportedLValue(E, "pre-inc/dec expression");
+ case UnaryOperator::PreDec: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ bool isInc = E->getOpcode() == UnaryOperator::PreInc;
+
+ if (E->getType()->isAnyComplexType())
+ EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ else
+ EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ return LV;
+ }
}
}
@@ -1139,16 +1273,16 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
QualType BaseType = getContext().getBaseElementType(VAT);
- uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8;
+ CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType);
Idx = Builder.CreateUDiv(Idx,
llvm::ConstantInt::get(Idx->getType(),
- BaseTypeSize));
+ BaseTypeSize.getQuantity()));
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
} else if (const ObjCInterfaceType *OIT =
dyn_cast<ObjCInterfaceType>(E->getType())) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
- getContext().getTypeSize(OIT) / 8);
+ getContext().getTypeSizeInChars(OIT).getQuantity());
Idx = Builder.CreateMul(Idx, InterfaceSize);
@@ -1211,8 +1345,8 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
Base = EmitLValue(E->getBase());
} else {
// Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
- const VectorType *VT = E->getBase()->getType()->getAs<VectorType>();
- assert(VT && "Result must be a vector");
+ assert(E->getBase()->getType()->getAs<VectorType>() &&
+ "Result must be a vector");
llvm::Value *Vec = EmitScalarExpr(E->getBase());
// Store the vector to memory (because LValue wants an address).
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index b95fd79..c852d65 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -313,7 +313,8 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
"Unexpected member pointer type!");
const DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl());
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(DRE->getDecl())->getCanonicalDecl();
const llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 7992322..e264109 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -15,6 +15,334 @@
using namespace clang;
using namespace CodeGen;
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This),
+ MD->getThisType(getContext())));
+
+ // If there is a VTT parameter, emit it.
+ if (VTT) {
+ QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+ Args.push_back(std::make_pair(RValue::get(VTT), T));
+ }
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+ QualType ResultType = MD->getType()->getAs<FunctionType>()->getResultType();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), Callee,
+ ReturnValue, Args, MD);
+}
+
+/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXTemporaryObjectExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
+ ReturnValueSlot ReturnValue) {
+ if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
+ return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
+
+ const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ if (MD->isStatic()) {
+ // The method is static, emit it as we would a regular call.
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee,
+ ReturnValue, CE->arg_begin(), CE->arg_end());
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Value *This;
+
+ if (ME->isArrow())
+ This = EmitScalarExpr(ME->getBase());
+ else {
+ LValue BaseLV = EmitLValue(ME->getBase());
+ This = BaseLV.getAddress();
+ }
+
+ if (MD->isCopyAssignment() && MD->isTrivial()) {
+ // We don't like to generate the trivial copy assignment operator when
+ // it isn't necessary; just produce the proper effect here.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ llvm::Value *Callee;
+ if (const CXXDestructorDecl *Destructor
+ = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (Destructor->isTrivial())
+ return RValue::get(0);
+ if (MD->isVirtual() && !ME->hasQualifier() &&
+ !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
+ Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty);
+ } else {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
+ }
+ } else if (MD->isVirtual() && !ME->hasQualifier() &&
+ !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
+ Callee = BuildVirtualCall(MD, This, Ty);
+ } else {
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+ }
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ const BinaryOperator *BO =
+ cast<BinaryOperator>(E->getCallee()->IgnoreParens());
+ const Expr *BaseExpr = BO->getLHS();
+ const Expr *MemFnExpr = BO->getRHS();
+
+ const MemberPointerType *MPT =
+ MemFnExpr->getType()->getAs<MemberPointerType>();
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr =
+ CreateTempAlloca(ConvertType(MemFnExpr->getType()), "mem.fn");
+ EmitAggExpr(MemFnExpr, MemFnPtr, /*VolatileDest=*/false);
+
+ // Emit the 'this' pointer.
+ llvm::Value *This;
+
+ if (BO->getOpcode() == BinaryOperator::PtrMemI)
+ This = EmitScalarExpr(BaseExpr);
+ else
+ This = EmitLValue(BaseExpr).getAddress();
+
+ // Adjust it.
+ llvm::Value *Adj = Builder.CreateStructGEP(MemFnPtr, 1);
+ Adj = Builder.CreateLoad(Adj, "mem.fn.adj");
+
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Int8PtrTy, "ptr");
+ Ptr = Builder.CreateGEP(Ptr, Adj, "adj");
+
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this");
+
+ llvm::Value *FnPtr = Builder.CreateStructGEP(MemFnPtr, 0, "mem.fn.ptr");
+
+ const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+
+ llvm::Value *FnAsInt = Builder.CreateLoad(FnPtr, "fn");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual
+ = Builder.CreateAnd(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1),
+ "and");
+
+ IsVirtual = Builder.CreateTrunc(IsVirtual,
+ llvm::Type::getInt1Ty(VMContext));
+
+ llvm::BasicBlock *FnVirtual = createBasicBlock("fn.virtual");
+ llvm::BasicBlock *FnNonVirtual = createBasicBlock("fn.nonvirtual");
+ llvm::BasicBlock *FnEnd = createBasicBlock("fn.end");
+
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+ EmitBlock(FnVirtual);
+
+ const llvm::Type *VTableTy =
+ FTy->getPointerTo()->getPointerTo()->getPointerTo();
+
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy);
+ VTable = Builder.CreateLoad(VTable);
+
+ VTable = Builder.CreateGEP(VTable, FnAsInt, "fn");
+
+ // Since the function pointer is 1 plus the virtual table offset, we
+ // subtract 1 by using a GEP.
+ VTable = Builder.CreateConstGEP1_64(VTable, (uint64_t)-1);
+
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "virtualfn");
+
+ EmitBranch(FnEnd);
+ EmitBlock(FnNonVirtual);
+
+ // If the function is not virtual, just load the pointer.
+ llvm::Value *NonVirtualFn = Builder.CreateLoad(FnPtr, "fn");
+ NonVirtualFn = Builder.CreateIntToPtr(NonVirtualFn, FTy->getPointerTo());
+
+ EmitBlock(FnEnd);
+
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
+ Callee->reserveOperandSpace(2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+
+ CallArgList Args;
+
+ QualType ThisType =
+ getContext().getPointerType(getContext().getTagDeclType(RD));
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This), ThisType));
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ QualType ResultType = BO->getType()->getAs<FunctionType>()->getResultType();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), Callee,
+ ReturnValue, Args);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ if (MD->isCopyAssignment()) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
+ if (ClassDecl->hasTrivialCopyAssignment()) {
+ assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
+ "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
+ llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+ llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+ QualType Ty = E->getType();
+ EmitAggregateCopy(This, Src, Ty);
+ return RValue::get(This);
+ }
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+
+ llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+
+ llvm::Value *Callee;
+ if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
+ Callee = BuildVirtualCall(MD, This, Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ E->arg_begin() + 1, E->arg_end());
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+ const CXXConstructExpr *E) {
+ assert(Dest && "Must have a destination!");
+ const CXXConstructorDecl *CD = E->getConstructor();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(E->getType());
+ // For a copy constructor, even if it is trivial, must fall thru so
+ // its argument is code-gen'ed.
+ if (!CD->isCopyConstructor()) {
+ QualType InitType = E->getType();
+ if (Array)
+ InitType = getContext().getBaseElementType(Array);
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(InitType->getAs<RecordType>()->getDecl());
+ if (RD->hasTrivialConstructor())
+ return;
+ }
+ // Code gen optimization to eliminate copy constructor and return
+ // its first argument instead.
+ if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
+ const Expr *Arg = E->getArg(0);
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ assert((ICE->getCastKind() == CastExpr::CK_NoOp ||
+ ICE->getCastKind() == CastExpr::CK_ConstructorConversion ||
+ ICE->getCastKind() == CastExpr::CK_UserDefinedConversion) &&
+ "Unknown implicit cast kind in constructor elision");
+ Arg = ICE->getSubExpr();
+ }
+
+ if (const CXXFunctionalCastExpr *FCE = dyn_cast<CXXFunctionalCastExpr>(Arg))
+ Arg = FCE->getSubExpr();
+
+ if (const CXXBindTemporaryExpr *BindExpr =
+ dyn_cast<CXXBindTemporaryExpr>(Arg))
+ Arg = BindExpr->getSubExpr();
+
+ EmitAggExpr(Arg, Dest, false);
+ return;
+ }
+ if (Array) {
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(Dest, BasePtr);
+
+ EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
+ E->arg_begin(), E->arg_end());
+ }
+ else
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Ctor_Complete, Dest,
+ E->arg_begin(), E->arg_end());
+}
+
static uint64_t CalculateCookiePadding(ASTContext &Ctx, QualType ElementType) {
const RecordType *RT = ElementType->getAs<RecordType>();
if (!RT)
@@ -405,7 +733,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
/*isVariadic=*/false);
llvm::Value *Callee = BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
- EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, 0, 0);
+ EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
+ 0, 0);
// The dtor took care of deleting the object.
ShouldCallDelete = false;
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index be2239f..5ec336c 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -145,7 +145,10 @@ public:
// Operators.
ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
- bool isInc, bool isPre);
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
+ }
ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
return VisitPrePostIncDec(E, false, false);
}
@@ -355,40 +358,6 @@ ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
}
-ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
- bool isInc, bool isPre) {
- LValue LV = CGF.EmitLValue(E->getSubExpr());
- ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(),
- LV.isVolatileQualified());
-
- llvm::Value *NextVal;
- if (isa<llvm::IntegerType>(InVal.first->getType())) {
- uint64_t AmountVal = isInc ? 1 : -1;
- NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
-
- // Add the inc/dec to the real part.
- NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
- } else {
- QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
- llvm::APFloat FVal(CGF.getContext().getFloatTypeSemantics(ElemTy), 1);
- if (!isInc)
- FVal.changeSign();
- NextVal = llvm::ConstantFP::get(CGF.getLLVMContext(), FVal);
-
- // Add the inc/dec to the real part.
- NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
- }
-
- ComplexPairTy IncVal(NextVal, InVal.second);
-
- // Store the updated result through the lvalue.
- EmitStoreOfComplex(IncVal, LV.getAddress(), LV.isVolatileQualified());
-
- // If this is a postinc, return the value read from memory, otherwise use the
- // updated value.
- return isPre ? IncVal : InVal;
-}
-
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index d428983..dec06e2 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -408,6 +408,8 @@ public:
llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
assert(MD->isInstance() && "Member function must not be static!");
+ MD = MD->getCanonicalDecl();
+
const llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
@@ -633,32 +635,6 @@ public:
return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
}
- llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
- const llvm::VectorType *VType =
- cast<llvm::VectorType>(ConvertType(ILE->getType()));
- const llvm::Type *ElemTy = VType->getElementType();
- std::vector<llvm::Constant*> Elts;
- unsigned NumElements = VType->getNumElements();
- unsigned NumInitElements = ILE->getNumInits();
-
- unsigned NumInitableElts = std::min(NumInitElements, NumElements);
-
- // Copy initializer elements.
- unsigned i = 0;
- for (; i < NumInitableElts; ++i) {
- Expr *Init = ILE->getInit(i);
- llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
- if (!C)
- return 0;
- Elts.push_back(C);
- }
-
- for (; i < NumElements; ++i)
- Elts.push_back(llvm::Constant::getNullValue(ElemTy));
-
- return llvm::ConstantVector::get(VType, Elts);
- }
-
llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
return CGM.EmitNullConstant(E->getType());
}
@@ -682,8 +658,9 @@ public:
if (ILE->getType()->isUnionType())
return EmitUnionInitialization(ILE);
+ // If ILE was a constant vector, we would have handled it already.
if (ILE->getType()->isVectorType())
- return EmitVectorInitialization(ILE);
+ return 0;
assert(0 && "Unable to handle InitListExpr");
// Get rid of control reaches end of void function warning.
@@ -833,7 +810,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
llvm::Constant *Offset =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- Result.Val.getLValueOffset());
+ Result.Val.getLValueOffset().getQuantity());
llvm::Constant *C;
if (const Expr *LVBase = Result.Val.getLValueBase()) {
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 93646d6..690a7dc 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -181,12 +181,6 @@ public:
Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return EmitLoadOfLValue(E);
}
- Value *VisitStringLiteral(Expr *E) { return EmitLValue(E).getAddress(); }
- Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
- return EmitLValue(E).getAddress();
- }
-
- Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); }
Value *VisitInitListExpr(InitListExpr *E);
@@ -214,7 +208,10 @@ public:
Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
// Unary Operators.
- Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
+ Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return CGF.EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+ }
Value *VisitUnaryPostDec(const UnaryOperator *E) {
return VisitPrePostIncDec(E, false, false);
}
@@ -1009,98 +1006,6 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
// Unary Operators
//===----------------------------------------------------------------------===//
-Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
- bool isInc, bool isPre) {
- LValue LV = EmitLValue(E->getSubExpr());
- QualType ValTy = E->getSubExpr()->getType();
- Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal();
-
- llvm::LLVMContext &VMContext = CGF.getLLVMContext();
-
- int AmountVal = isInc ? 1 : -1;
-
- if (ValTy->isPointerType() &&
- ValTy->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition/subtraction needs to account for the VLA size
- CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
- }
-
- Value *NextVal;
- if (const llvm::PointerType *PT =
- dyn_cast<llvm::PointerType>(InVal->getType())) {
- llvm::Constant *Inc =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
- if (!isa<llvm::FunctionType>(PT->getElementType())) {
- QualType PTEE = ValTy->getPointeeType();
- if (const ObjCInterfaceType *OIT =
- dyn_cast<ObjCInterfaceType>(PTEE)) {
- // Handle interface types, which are not represented with a concrete type.
- int size = CGF.getContext().getTypeSize(OIT) / 8;
- if (!isInc)
- size = -size;
- Inc = llvm::ConstantInt::get(Inc->getType(), size);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- InVal = Builder.CreateBitCast(InVal, i8Ty);
- NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
- llvm::Value *lhs = LV.getAddress();
- lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
- LV = LValue::MakeAddr(lhs, CGF.MakeQualifiers(ValTy));
- } else
- NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
- } else {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
- NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
- NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
- }
- } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) {
- // Bool++ is an interesting case, due to promotion rules, we get:
- // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
- // Bool = ((int)Bool+1) != 0
- // An interesting aspect of this is that increment is always true.
- // Decrement does not have this property.
- NextVal = llvm::ConstantInt::getTrue(VMContext);
- } else if (isa<llvm::IntegerType>(InVal->getType())) {
- NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
-
- // Signed integer overflow is undefined behavior.
- if (ValTy->isSignedIntegerType())
- NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
- else
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
- } else {
- // Add the inc/dec to the real part.
- if (InVal->getType()->isFloatTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<float>(AmountVal)));
- else if (InVal->getType()->isDoubleTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<double>(AmountVal)));
- else {
- llvm::APFloat F(static_cast<float>(AmountVal));
- bool ignored;
- F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
- &ignored);
- NextVal = llvm::ConstantFP::get(VMContext, F);
- }
- NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
- }
-
- // Store the updated result through the lvalue.
- if (LV.isBitfield())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy,
- &NextVal);
- else
- CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
-
- // If this is a postinc, return the value read from memory, otherwise use the
- // updated value.
- return isPre ? NextVal : InVal;
-}
-
-
Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreResultAssign();
Value *Op = Visit(E->getSubExpr());
@@ -1405,7 +1310,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().getTypeSize(OIT) / 8);
+ CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
Idx = Builder.CreateMul(Idx, InterfaceSize);
const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
@@ -1469,7 +1374,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
dyn_cast<ObjCInterfaceType>(LHSElementType)) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().getTypeSize(OIT) / 8);
+ CGF.getContext().
+ getTypeSizeInChars(OIT).getQuantity());
Idx = Builder.CreateMul(Idx, InterfaceSize);
const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
@@ -1493,14 +1399,14 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
Value *LHS = Ops.LHS;
Value *RHS = Ops.RHS;
- uint64_t ElementSize;
+ CharUnits ElementSize;
// Handle GCC extension for pointer arithmetic on void* and function pointer
// types.
if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
- ElementSize = 1;
+ ElementSize = CharUnits::One();
} else {
- ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8;
+ ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
}
const llvm::Type *ResultType = ConvertType(Ops.Ty);
@@ -1509,13 +1415,14 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
// Optimize out the shift for element size of 1.
- if (ElementSize == 1)
+ if (ElementSize.isOne())
return BytesBetween;
// Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
// pointer difference in C is only defined in the case where both operands
// are pointing to elements of an array.
- Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize);
+ Value *BytesPerElt =
+ llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
}
}
@@ -1971,47 +1878,6 @@ Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
DstTy);
}
-Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
- assert(V1->getType() == V2->getType() &&
- "Vector operands must be of the same type");
- unsigned NumElements =
- cast<llvm::VectorType>(V1->getType())->getNumElements();
-
- va_list va;
- va_start(va, V2);
-
- llvm::SmallVector<llvm::Constant*, 16> Args;
- for (unsigned i = 0; i < NumElements; i++) {
- int n = va_arg(va, int);
- assert(n >= 0 && n < (int)NumElements * 2 &&
- "Vector shuffle index out of bounds!");
- Args.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), n));
- }
-
- const char *Name = va_arg(va, const char *);
- va_end(va);
-
- llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
-
- return Builder.CreateShuffleVector(V1, V2, Mask, Name);
-}
-
-llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
- unsigned NumVals, bool isSplat) {
- llvm::Value *Vec
- = llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals));
-
- for (unsigned i = 0, e = NumVals; i != e; ++i) {
- llvm::Value *Val = isSplat ? Vals[0] : Vals[i];
- llvm::Value *Idx = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), i);
- Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp");
- }
-
- return Vec;
-}
-
LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
llvm::Value *V;
// object->isa or (*object).isa
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 95f67ae..e7a2093 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -114,9 +114,11 @@ private:
llvm::Constant *ExportUniqueString(const std::string &Str, const std::string
prefix);
llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
- std::vector<llvm::Constant*> &V, const std::string &Name="");
+ std::vector<llvm::Constant*> &V, const std::string &Name="",
+ llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
- std::vector<llvm::Constant*> &V, const std::string &Name="");
+ std::vector<llvm::Constant*> &V, const std::string &Name="",
+ llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar);
void EmitClassRef(const std::string &className);
@@ -215,8 +217,11 @@ static std::string SymbolNameForClass(const std::string &ClassName) {
static std::string SymbolNameForMethod(const std::string &ClassName, const
std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
{
- return "_OBJC_METHOD_" + ClassName + "("+CategoryName+")"+
- (isClassMethod ? "+" : "-") + MethodName;
+ std::string MethodNameColonStripped = MethodName;
+ std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+ ':', '_');
+ return std::string(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped;
}
CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
@@ -257,6 +262,10 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID) {
llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+ // With the incompatible ABI, this will need to be replaced with a direct
+ // reference to the class symbol. For the compatible nonfragile ABI we are
+ // still performing this lookup at run time but emitting the symbol for the
+ // class externally so that we can make the switch later.
EmitClassRef(OID->getNameAsString());
ClassName = Builder.CreateStructGEP(ClassName, 0);
@@ -323,14 +332,16 @@ llvm::Constant *CGObjCGNU::ExportUniqueString(const std::string &Str,
}
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
- std::vector<llvm::Constant*> &V, const std::string &Name) {
+ std::vector<llvm::Constant*> &V, const std::string &Name,
+ llvm::GlobalValue::LinkageTypes linkage) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
llvm::GlobalValue::InternalLinkage, C, Name);
}
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
- std::vector<llvm::Constant*> &V, const std::string &Name) {
+ std::vector<llvm::Constant*> &V, const std::string &Name,
+ llvm::GlobalValue::LinkageTypes linkage) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
return new llvm::GlobalVariable(TheModule, Ty, false,
llvm::GlobalValue::InternalLinkage, C, Name);
@@ -703,7 +714,10 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(IvarOffsets);
Elements.push_back(Properties);
// Create an instance of the structure
- return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name));
+ // This is now an externally visible symbol, so that we can speed up class
+ // messages in the next ABI.
+ return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name),
+ llvm::GlobalValue::ExternalLinkage);
}
llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
@@ -1607,7 +1621,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
Params.push_back(PtrTy);
llvm::Value *RethrowFn =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, false), "objc_exception_throw");
+ Params, false), "_Unwind_Resume");
bool isTry = isa<ObjCAtTryStmt>(S);
llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
@@ -1923,7 +1937,7 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
if (!IvarOffsetPointer) {
uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
llvm::ConstantInt *OffsetGuess =
- llvm::ConstantInt::get(LongTy, Offset, "ivar");
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset, "ivar");
// Don't emit the guess in non-PIC code because the linker will not be able
// to replace it with the real version for a library. In non-PIC code you
// must compile with the fragile ABI if you want to use ivars from a
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index db6c507..29552ce 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -360,28 +360,12 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
// If we're in an anonymous namespace, then we always want internal linkage.
if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
return llvm::GlobalVariable::InternalLinkage;
-
+
+ // If this class does not have a vtable, we want weak linkage.
if (!RD->isDynamicClass())
return llvm::GlobalValue::WeakODRLinkage;
- // Get the key function.
- const CXXMethodDecl *KeyFunction = RD->getASTContext().getKeyFunction(RD);
- if (!KeyFunction) {
- // There is no key function, the RTTI descriptor is emitted with weak_odr
- // linkage.
- return llvm::GlobalValue::WeakODRLinkage;
- }
-
- // If the key function is defined, but inlined, then the RTTI descriptor is
- // emitted with weak_odr linkage.
- const FunctionDecl* KeyFunctionDefinition;
- KeyFunction->getBody(KeyFunctionDefinition);
-
- if (KeyFunctionDefinition->isInlined())
- return llvm::GlobalValue::WeakODRLinkage;
-
- // Otherwise, the RTTI descriptor is emitted with external linkage.
- return llvm::GlobalValue::ExternalLinkage;
+ return CodeGenModule::getVtableLinkage(RD);
}
case Type::Vector:
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp
index 7930f71..ae900d6 100644
--- a/lib/CodeGen/CGVtable.cpp
+++ b/lib/CodeGen/CGVtable.cpp
@@ -160,17 +160,19 @@ private:
// vtable for use in computing the initializers for the VTT.
llvm::DenseMap<CtorVtable_t, int64_t> &subAddressPoints;
+ /// AddressPoints - Address points for this vtable.
+ CGVtableInfo::AddressPointsMapTy& AddressPoints;
+
typedef CXXRecordDecl::method_iterator method_iter;
- const bool Extern;
const uint32_t LLVMPointerWidth;
Index_t extra;
typedef std::vector<std::pair<const CXXRecordDecl *, int64_t> > Path_t;
static llvm::DenseMap<CtorVtable_t, int64_t>&
AllocAddressPoint(CodeGenModule &cgm, const CXXRecordDecl *l,
const CXXRecordDecl *c) {
- CodeGenModule::AddrMap_t *&oref = cgm.AddressPoints[l];
+ CGVtableInfo::AddrMap_t *&oref = cgm.getVtableInfo().AddressPoints[l];
if (oref == 0)
- oref = new CodeGenModule::AddrMap_t;
+ oref = new CGVtableInfo::AddrMap_t;
llvm::DenseMap<CtorVtable_t, int64_t> *&ref = (*oref)[c];
if (ref == 0)
@@ -193,14 +195,15 @@ private:
public:
VtableBuilder(const CXXRecordDecl *MostDerivedClass,
const CXXRecordDecl *l, uint64_t lo, CodeGenModule &cgm,
- bool build)
+ bool build, CGVtableInfo::AddressPointsMapTy& AddressPoints)
: BuildVtable(build), MostDerivedClass(MostDerivedClass), LayoutClass(l),
LayoutOffset(lo), BLayout(cgm.getContext().getASTRecordLayout(l)),
rtti(0), VMContext(cgm.getModule().getContext()),CGM(cgm),
PureVirtualFn(0),
subAddressPoints(AllocAddressPoint(cgm, l, MostDerivedClass)),
- Extern(!l->isInAnonymousNamespace()),
- LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0)) {
+ AddressPoints(AddressPoints),
+ LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0))
+ {
Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
if (BuildVtable) {
QualType ClassType = CGM.getContext().getTagDeclType(MostDerivedClass);
@@ -213,7 +216,7 @@ public:
return VtableComponents;
}
- llvm::DenseMap<const CXXRecordDecl *, Index_t> &getVBIndex()
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getVBIndex()
{ return VBIndex; }
SavedAdjustmentsVectorTy &getSavedAdjustments()
@@ -463,6 +466,7 @@ public:
RD->getNameAsCString(), Class->getNameAsCString(),
LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
+ AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
// Now also add the address point for all our primary bases.
while (1) {
@@ -479,6 +483,7 @@ public:
RD->getNameAsCString(), Class->getNameAsCString(),
LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
+ AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
}
}
@@ -827,7 +832,6 @@ bool VtableBuilder::OverrideMethod(GlobalDecl GD, bool MorallyVirtual,
MD->getNameAsString().c_str(), (int)-idx-3,
(int)VCalls[idx-1], Class->getNameAsCString()));
}
- VCall[GD] = idx;
int64_t NonVirtualAdjustment = NonVirtualOffset[GD];
int64_t VirtualAdjustment =
-((idx + extra + 2) * LLVMPointerWidth / 8);
@@ -844,6 +848,7 @@ bool VtableBuilder::OverrideMethod(GlobalDecl GD, bool MorallyVirtual,
SavedAdjustments.push_back(
std::make_pair(GD, std::make_pair(OGD, ThisAdjustment)));
}
+ VCall[GD] = idx;
return true;
}
@@ -1090,7 +1095,8 @@ CGVtableInfo::getAdjustments(GlobalDecl GD) {
if (!SavedAdjustmentRecords.insert(RD).second)
return 0;
- VtableBuilder b(RD, RD, 0, CGM, false);
+ AddressPointsMapTy AddressPoints;
+ VtableBuilder b(RD, RD, 0, CGM, false, AddressPoints);
D1(printf("vtable %s\n", RD->getNameAsCString()));
b.GenerateVtableForBase(RD);
b.GenerateVtableForVBases(RD);
@@ -1118,7 +1124,8 @@ int64_t CGVtableInfo::getVirtualBaseOffsetIndex(const CXXRecordDecl *RD,
// FIXME: This seems expensive. Can we do a partial job to get
// just this data.
- VtableBuilder b(RD, RD, 0, CGM, false);
+ AddressPointsMapTy AddressPoints;
+ VtableBuilder b(RD, RD, 0, CGM, false, AddressPoints);
D1(printf("vtable %s\n", RD->getNameAsCString()));
b.GenerateVtableForBase(RD);
b.GenerateVtableForVBases(RD);
@@ -1139,7 +1146,7 @@ int64_t CGVtableInfo::getVirtualBaseOffsetIndex(const CXXRecordDecl *RD,
uint64_t CGVtableInfo::getVtableAddressPoint(const CXXRecordDecl *RD) {
uint64_t AddressPoint =
- (*(*(CGM.AddressPoints[RD]))[RD])[std::make_pair(RD, 0)];
+ (*(*(CGM.getVtableInfo().AddressPoints[RD]))[RD])[std::make_pair(RD, 0)];
return AddressPoint;
}
@@ -1148,7 +1155,8 @@ llvm::GlobalVariable *
CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
bool GenerateDefinition,
const CXXRecordDecl *LayoutClass,
- const CXXRecordDecl *RD, uint64_t Offset) {
+ const CXXRecordDecl *RD, uint64_t Offset,
+ AddressPointsMapTy& AddressPoints) {
llvm::SmallString<256> OutName;
if (LayoutClass != RD)
CGM.getMangleContext().mangleCXXCtorVtable(LayoutClass, Offset / 8,
@@ -1158,8 +1166,10 @@ CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
llvm::StringRef Name = OutName.str();
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (GV == 0 || CGM.AddressPoints[LayoutClass] == 0 || GV->isDeclaration()) {
- VtableBuilder b(RD, LayoutClass, Offset, CGM, GenerateDefinition);
+ if (GV == 0 || CGM.getVtableInfo().AddressPoints[LayoutClass] == 0 ||
+ GV->isDeclaration()) {
+ VtableBuilder b(RD, LayoutClass, Offset, CGM, GenerateDefinition,
+ AddressPoints);
D1(printf("vtable %s\n", RD->getNameAsCString()));
// First comes the vtables for all the non-virtual bases...
@@ -1206,11 +1216,51 @@ class VTTBuilder {
/// BLayout - Layout for the most derived class that this vtable is being
/// built for.
const ASTRecordLayout &BLayout;
- CodeGenModule::AddrMap_t &AddressPoints;
+ CGVtableInfo::AddrMap_t &AddressPoints;
// vtbl - A pointer to the vtable for Class.
llvm::Constant *ClassVtbl;
llvm::LLVMContext &VMContext;
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies;
+
+ bool GenerateDefinition;
+
+ llvm::DenseMap<BaseSubobject, llvm::Constant *> CtorVtables;
+ llvm::DenseMap<std::pair<const CXXRecordDecl *, BaseSubobject>, uint64_t>
+ CtorVtableAddressPoints;
+
+ llvm::Constant *getCtorVtable(const BaseSubobject &Base) {
+ if (!GenerateDefinition)
+ return 0;
+
+ llvm::Constant *&CtorVtable = CtorVtables[Base];
+ if (!CtorVtable) {
+ // Build the vtable.
+ CGVtableInfo::CtorVtableInfo Info
+ = CGM.getVtableInfo().getCtorVtable(Class, Base);
+
+ CtorVtable = Info.Vtable;
+
+ // Add the address points for this base.
+ for (CGVtableInfo::AddressPointsMapTy::const_iterator I =
+ Info.AddressPoints.begin(), E = Info.AddressPoints.end();
+ I != E; ++I) {
+ uint64_t &AddressPoint =
+ CtorVtableAddressPoints[std::make_pair(Base.getBase(), I->first)];
+
+ // Check if we already have the address points for this base.
+ if (AddressPoint)
+ break;
+
+ // Otherwise, insert it.
+ AddressPoint = I->second;
+ }
+ }
+
+ return CtorVtable;
+ }
+
+
/// BuildVtablePtr - Build up a referene to the given secondary vtable
llvm::Constant *BuildVtablePtr(llvm::Constant *Vtable,
const CXXRecordDecl *VtableClass,
@@ -1270,14 +1320,17 @@ class VTTBuilder {
// FIXME: Slightly too many of these for __ZTT8test8_B2
llvm::Constant *init;
if (BaseMorallyVirtual)
- init = BuildVtablePtr(vtbl, VtblClass, RD, Offset);
+ init = GenerateDefinition ?
+ BuildVtablePtr(vtbl, VtblClass, RD, Offset) : 0;
else {
- init = CGM.getVtableInfo().getCtorVtable(Class, Base, BaseOffset);
+ init = GenerateDefinition ?
+ getCtorVtable(BaseSubobject(Base, BaseOffset)) : 0;
subvtbl = init;
subVtblClass = Base;
- init = BuildVtablePtr(init, Class, Base, BaseOffset);
+ init = GenerateDefinition ?
+ BuildVtablePtr(init, Class, Base, BaseOffset) : 0;
}
Inits.push_back(init);
}
@@ -1296,14 +1349,16 @@ class VTTBuilder {
// First comes the primary virtual table pointer...
if (MorallyVirtual) {
- Vtable = ClassVtbl;
+ Vtable = GenerateDefinition ? ClassVtbl : 0;
VtableClass = Class;
} else {
- Vtable = CGM.getVtableInfo().getCtorVtable(Class, RD, Offset);
+ Vtable = GenerateDefinition ?
+ getCtorVtable(BaseSubobject(RD, Offset)) : 0;
VtableClass = RD;
}
- llvm::Constant *Init = BuildVtablePtr(Vtable, VtableClass, RD, Offset);
+ llvm::Constant *Init = GenerateDefinition ?
+ BuildVtablePtr(Vtable, VtableClass, RD, Offset) : 0;
Inits.push_back(Init);
// then the secondary VTTs....
@@ -1326,6 +1381,10 @@ class VTTBuilder {
continue;
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+
+ // Remember the sub-VTT index.
+ SubVTTIndicies[Base] = Inits.size();
+
BuildVTT(Base, BaseOffset, MorallyVirtual);
}
}
@@ -1338,6 +1397,9 @@ class VTTBuilder {
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
if (i->isVirtual() && !SeenVBase.count(Base)) {
+ // Remember the sub-VTT index.
+ SubVTTIndicies[Base] = Inits.size();
+
SeenVBase.insert(Base);
uint64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
BuildVTT(Base, BaseOffset, true);
@@ -1348,15 +1410,18 @@ class VTTBuilder {
public:
VTTBuilder(std::vector<llvm::Constant *> &inits, const CXXRecordDecl *c,
- CodeGenModule &cgm)
+ CodeGenModule &cgm, bool GenerateDefinition)
: Inits(inits), Class(c), CGM(cgm),
BLayout(cgm.getContext().getASTRecordLayout(c)),
- AddressPoints(*cgm.AddressPoints[c]),
- VMContext(cgm.getModule().getContext()) {
+ AddressPoints(*cgm.getVtableInfo().AddressPoints[c]),
+ VMContext(cgm.getModule().getContext()),
+ GenerateDefinition(GenerateDefinition) {
// First comes the primary virtual table pointer for the complete class...
ClassVtbl = CGM.getVtableInfo().getVtable(Class);
- Inits.push_back(BuildVtablePtr(ClassVtbl, Class, Class, 0));
+ llvm::Constant *Init = GenerateDefinition ?
+ BuildVtablePtr(ClassVtbl, Class, Class, 0) : 0;
+ Inits.push_back(Init);
// then the secondary VTTs...
SecondaryVTTs(Class);
@@ -1367,11 +1432,16 @@ public:
// and last, the virtual VTTs.
VirtualVTTs(Class);
}
+
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getSubVTTIndicies() {
+ return SubVTTIndicies;
+ }
};
}
llvm::GlobalVariable *
CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
const CXXRecordDecl *RD) {
// Only classes that have virtual bases need a VTT.
if (RD->getNumVBases() == 0)
@@ -1381,23 +1451,36 @@ CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
CGM.getMangleContext().mangleCXXVTT(RD, OutName);
llvm::StringRef Name = OutName.str();
-
D1(printf("vtt %s\n", RD->getNameAsCString()));
- std::vector<llvm::Constant *> inits;
- VTTBuilder b(inits, RD, CGM);
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV == 0 || GV->isDeclaration()) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::ArrayType *Type = llvm::ArrayType::get(Int8PtrTy, inits.size());
-
- llvm::Constant *Init = llvm::ConstantArray::get(Type, inits);
-
- llvm::GlobalVariable *VTT =
- new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
- Linkage, Init, Name);
- CGM.setGlobalVisibility(VTT, RD);
+ std::vector<llvm::Constant *> inits;
+ VTTBuilder b(inits, RD, CGM, GenerateDefinition);
+
+ const llvm::ArrayType *Type = llvm::ArrayType::get(Int8PtrTy, inits.size());
+ llvm::Constant *Init = 0;
+ if (GenerateDefinition)
+ Init = llvm::ConstantArray::get(Type, inits);
+
+ llvm::GlobalVariable *OldGV = GV;
+ GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
+ Linkage, Init, Name);
+ CGM.setGlobalVisibility(GV, RD);
+
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+ }
- return VTT;
+ return GV;
}
void CGVtableInfo::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
@@ -1408,28 +1491,44 @@ void CGVtableInfo::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
return;
}
- Vtable = GenerateVtable(Linkage, /*GenerateDefinition=*/true, RD, RD, 0);
- GenerateVTT(Linkage, RD);
+ AddressPointsMapTy AddressPoints;
+ Vtable = GenerateVtable(Linkage, /*GenerateDefinition=*/true, RD, RD, 0,
+ AddressPoints);
+ GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
}
llvm::GlobalVariable *CGVtableInfo::getVtable(const CXXRecordDecl *RD) {
llvm::GlobalVariable *Vtable = Vtables.lookup(RD);
- if (!Vtable)
+ if (!Vtable) {
+ AddressPointsMapTy AddressPoints;
Vtable = GenerateVtable(llvm::GlobalValue::ExternalLinkage,
- /*GenerateDefinition=*/false, RD, RD, 0);
+ /*GenerateDefinition=*/false, RD, RD, 0,
+ AddressPoints);
+ }
return Vtable;
}
-llvm::GlobalVariable *
-CGVtableInfo::getCtorVtable(const CXXRecordDecl *LayoutClass,
- const CXXRecordDecl *RD, uint64_t Offset) {
- return GenerateVtable(llvm::GlobalValue::InternalLinkage,
- /*GenerateDefinition=*/true,
- LayoutClass, RD, Offset);
+CGVtableInfo::CtorVtableInfo
+CGVtableInfo::getCtorVtable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base) {
+ CtorVtableInfo Info;
+
+ Info.Vtable = GenerateVtable(llvm::GlobalValue::InternalLinkage,
+ /*GenerateDefinition=*/true,
+ RD, Base.getBase(), Base.getBaseOffset(),
+ Info.AddressPoints);
+ return Info;
+}
+
+llvm::GlobalVariable *CGVtableInfo::getVTT(const CXXRecordDecl *RD) {
+ return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
+ /*GenerateDefinition=*/false, RD);
+
}
+
void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const CXXRecordDecl *RD = MD->getParent();
@@ -1445,28 +1544,10 @@ void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
// We don't have the right key function.
if (KeyFunction->getCanonicalDecl() != MD->getCanonicalDecl())
return;
-
- // If the key function is a destructor, we only want to emit the vtable
- // once, so do it for the complete destructor.
- if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Complete)
- return;
- } else {
- // If there is no key function, we only want to emit the vtable if we are
- // emitting a constructor.
- if (!isa<CXXConstructorDecl>(MD) || GD.getCtorType() != Ctor_Complete)
- return;
}
- llvm::GlobalVariable::LinkageTypes Linkage;
- if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
- Linkage = llvm::GlobalVariable::InternalLinkage;
- else if (KeyFunction && !MD->isInlined())
- Linkage = llvm::GlobalVariable::ExternalLinkage;
- else
- Linkage = llvm::GlobalVariable::WeakODRLinkage;
-
// Emit the data.
- GenerateClassData(Linkage, RD);
+ GenerateClassData(CGM.getVtableLinkage(RD), RD);
for (CXXRecordDecl::method_iterator i = RD->method_begin(),
e = RD->method_end(); i != e; ++i) {
@@ -1481,3 +1562,47 @@ void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
}
}
+bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // We don't have any virtual bases, just return early.
+ if (!MD->getParent()->getNumVBases())
+ return false;
+
+ // Check if we have a base constructor.
+ if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+ return true;
+
+ // Check if we have a base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return true;
+
+ return false;
+}
+
+uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Base) {
+ ClassPairTy ClassPair(RD, Base);
+
+ SubVTTIndiciesTy::iterator I =
+ SubVTTIndicies.find(ClassPair);
+ if (I != SubVTTIndicies.end())
+ return I->second;
+
+ std::vector<llvm::Constant *> inits;
+ VTTBuilder Builder(inits, RD, CGM, /*GenerateDefinition=*/false);
+
+ for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+ Builder.getSubVTTIndicies().begin(),
+ E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ // Insert all indices.
+ ClassPairTy ClassPair(RD, I->first);
+
+ SubVTTIndicies.insert(std::make_pair(ClassPair, I->second));
+ }
+
+ I = SubVTTIndicies.find(ClassPair);
+ assert(I != SubVTTIndicies.end() && "Did not find index!");
+
+ return I->second;
+}
diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVtable.h
index eed5b64..471d638 100644
--- a/lib/CodeGen/CGVtable.h
+++ b/lib/CodeGen/CGVtable.h
@@ -61,11 +61,83 @@ public:
ThunkAdjustment ReturnAdjustment;
};
+// BaseSubobject - Uniquely identifies a direct or indirect base class.
+// Stores both the base class decl and the offset from the most derived class to
+// the base class.
+class BaseSubobject {
+ /// Base - The base class declaration.
+ const CXXRecordDecl *Base;
+
+ /// BaseOffset - The offset from the most derived class to the base class.
+ uint64_t BaseOffset;
+
+public:
+ BaseSubobject(const CXXRecordDecl *Base, uint64_t BaseOffset)
+ : Base(Base), BaseOffset(BaseOffset) { }
+
+ /// getBase - Returns the base class declaration.
+ const CXXRecordDecl *getBase() const { return Base; }
+
+ /// getBaseOffset - Returns the base class offset.
+ uint64_t getBaseOffset() const { return BaseOffset; }
+
+ friend bool operator==(const BaseSubobject &LHS, const BaseSubobject &RHS) {
+ return LHS.Base == RHS.Base && LHS.BaseOffset == RHS.BaseOffset;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CodeGen::BaseSubobject> {
+ static clang::CodeGen::BaseSubobject getEmptyKey() {
+ return clang::CodeGen::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getEmptyKey(),
+ DenseMapInfo<uint64_t>::getEmptyKey());
+ }
+
+ static clang::CodeGen::BaseSubobject getTombstoneKey() {
+ return clang::CodeGen::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getTombstoneKey(),
+ DenseMapInfo<uint64_t>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const clang::CodeGen::BaseSubobject &Base) {
+ return
+ DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
+ DenseMapInfo<uint64_t>::getHashValue(Base.getBaseOffset());
+ }
+
+ static bool isEqual(const clang::CodeGen::BaseSubobject &LHS,
+ const clang::CodeGen::BaseSubobject &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// It's OK to treat BaseSubobject as a POD type.
+template <> struct isPodLike<clang::CodeGen::BaseSubobject> {
+ static const bool value = true;
+};
+
+}
+
+namespace clang {
+namespace CodeGen {
+
class CGVtableInfo {
public:
typedef std::vector<std::pair<GlobalDecl, ThunkAdjustment> >
AdjustmentVectorTy;
+ typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t;
+ typedef llvm::DenseMap<CtorVtable_t, int64_t> AddrSubMap_t;
+ typedef llvm::DenseMap<const CXXRecordDecl *, AddrSubMap_t *> AddrMap_t;
+ llvm::DenseMap<const CXXRecordDecl *, AddrMap_t*> AddressPoints;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
+
private:
CodeGenModule &CGM;
@@ -93,6 +165,9 @@ private:
SavedAdjustmentsTy SavedAdjustments;
llvm::DenseSet<const CXXRecordDecl*> SavedAdjustmentRecords;
+ typedef llvm::DenseMap<ClassPairTy, uint64_t> SubVTTIndiciesTy;
+ SubVTTIndiciesTy SubVTTIndicies;
+
/// getNumVirtualFunctionPointers - Return the number of virtual function
/// pointers in the vtable for a given record decl.
uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
@@ -110,15 +185,26 @@ private:
llvm::GlobalVariable *
GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
bool GenerateDefinition, const CXXRecordDecl *LayoutClass,
- const CXXRecordDecl *RD, uint64_t Offset);
+ const CXXRecordDecl *RD, uint64_t Offset,
+ AddressPointsMapTy& AddressPoints);
llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
const CXXRecordDecl *RD);
public:
CGVtableInfo(CodeGenModule &CGM)
: CGM(CGM) { }
+ /// needsVTTParameter - Return whether the given global decl needs a VTT
+ /// parameter, which it does if it's a base constructor or destructor with
+ /// virtual bases.
+ static bool needsVTTParameter(GlobalDecl GD);
+
+ /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
+ /// given record decl.
+ uint64_t getSubVTTIndex(const CXXRecordDecl *RD, const CXXRecordDecl *Base);
+
/// getMethodVtableIndex - Return the index (relative to the vtable address
/// point) where the function pointer for the given virtual function is
/// stored.
@@ -140,14 +226,26 @@ public:
uint64_t getVtableAddressPoint(const CXXRecordDecl *RD);
llvm::GlobalVariable *getVtable(const CXXRecordDecl *RD);
- llvm::GlobalVariable *getCtorVtable(const CXXRecordDecl *RD,
- const CXXRecordDecl *Class,
- uint64_t Offset);
+ /// CtorVtableInfo - Information about a constructor vtable.
+ struct CtorVtableInfo {
+ /// Vtable - The vtable itself.
+ llvm::GlobalVariable *Vtable;
+
+ /// AddressPoints - The address points in this constructor vtable.
+ AddressPointsMapTy AddressPoints;
+
+ CtorVtableInfo() : Vtable(0) { }
+ };
+
+ CtorVtableInfo getCtorVtable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base);
+
+ llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
void MaybeEmitVtable(GlobalDecl GD);
};
-}
-}
+} // end namespace CodeGen
+} // end namespace clang
#endif
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 3c26484..45469d3 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -29,5 +29,5 @@ add_clang_library(clangCodeGen
CodeGenTypes.cpp
Mangle.cpp
ModuleBuilder.cpp
- TargetABIInfo.cpp
+ TargetInfo.cpp
)
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index f904f04..f0a5c64 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -190,15 +190,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0);
// Emit subprogram debug descriptor.
- // FIXME: The cast here is a huge hack.
if (CGDebugInfo *DI = getDebugInfo()) {
DI->setLocation(StartLoc);
- if (isa<FunctionDecl>(D)) {
- DI->EmitFunctionStart(CGM.getMangledName(GD), FnType, CurFn, Builder);
- } else {
- // Just use LLVM function name.
- DI->EmitFunctionStart(Fn->getName(), FnType, CurFn, Builder);
- }
+ DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
// FIXME: Leaked.
@@ -230,26 +224,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
}
}
-static bool NeedsVTTParameter(GlobalDecl GD) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- // We don't have any virtual bases, just return early.
- if (!MD->getParent()->getNumVBases())
- return false;
-
- // Check if we have a base constructor.
- if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
- return true;
-
- // Check if we have a base destructor.
- if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
- return true;
-
- return false;
-}
-
-void CodeGenFunction::GenerateCode(GlobalDecl GD,
- llvm::Function *Fn) {
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// Check if we should generate debug info for this function.
@@ -271,7 +246,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD,
Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
// Check if we need a VTT parameter as well.
- if (NeedsVTTParameter(GD)) {
+ if (CGVtableInfo::needsVTTParameter(GD)) {
// FIXME: The comment about using a fake decl above applies here too.
QualType T = getContext().getPointerType(getContext().VoidPtrTy);
CXXVTTDecl =
@@ -597,7 +572,7 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
ElemSize = EmitVLASize(ElemTy);
else
ElemSize = llvm::ConstantInt::get(SizeTy,
- getContext().getTypeSize(ElemTy) / 8);
+ getContext().getTypeSizeInChars(ElemTy).getQuantity());
llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 273ddca..30ad663 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -17,6 +17,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/CharUnits.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -463,7 +464,7 @@ public:
llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
llvm::Constant *BuildDescriptorBlockDecl(bool BlockHasCopyDispose,
- uint64_t Size,
+ CharUnits Size,
const llvm::StructType *,
std::vector<HelperInfo> *);
@@ -471,14 +472,14 @@ public:
const BlockInfo& Info,
const Decl *OuterFuncDecl,
llvm::DenseMap<const Decl*, llvm::Value*> ldm,
- uint64_t &Size, uint64_t &Align,
+ CharUnits &Size, uint64_t &Align,
llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
bool &subBlockHasCopyDispose);
void BlockForwardSelf();
llvm::Value *LoadBlockStruct();
- uint64_t AllocateBlockDecl(const BlockDeclRefExpr *E);
+ CharUnits AllocateBlockDecl(const BlockDeclRefExpr *E);
llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E);
const llvm::Type *BuildByRefType(const ValueDecl *D);
@@ -517,7 +518,7 @@ public:
void InitializeVtablePtrsRecursive(const CXXRecordDecl *ClassDecl,
llvm::Constant *Vtable,
- CodeGenModule::AddrSubMap_t& AddressPoints,
+ CGVtableInfo::AddrSubMap_t& AddressPoints,
llvm::Value *ThisPtr,
uint64_t Offset);
@@ -728,6 +729,10 @@ public:
/// generating code for an C++ member function.
llvm::Value *LoadCXXThis();
+ /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
+ /// virtual bases.
+ llvm::Value *LoadCXXVTT();
+
/// GetAddressOfBaseClass - This function will add the necessary delta to the
/// load of 'this' and returns address of the base class.
// FIXME. This currently only does a derived to non-virtual base conversion.
@@ -794,7 +799,7 @@ public:
llvm::Value *NumElements,
llvm::Value *This);
- llvm::Constant * GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ llvm::Constant *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
const ArrayType *Array,
llvm::Value *This);
@@ -815,6 +820,10 @@ public:
void EmitCheck(llvm::Value *, unsigned Size);
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
//===--------------------------------------------------------------------===//
// Declaration Emission
//===--------------------------------------------------------------------===//
@@ -1057,6 +1066,7 @@ public:
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
llvm::Value *This,
+ llvm::Value *VTT,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd);
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
@@ -1081,10 +1091,6 @@ public:
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
- llvm::Value *EmitShuffleVector(llvm::Value* V1, llvm::Value *V2, ...);
- llvm::Value *EmitVector(llvm::Value * const *Vals, unsigned NumVals,
- bool isSplat = false);
-
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
@@ -1184,9 +1190,11 @@ public:
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- const VarDecl **Decls,
+ llvm::Constant **Decls,
unsigned NumDecls);
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
+
void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index d497471..5ecc30e 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -17,6 +17,7 @@
#include "CGCall.h"
#include "CGObjCRuntime.h"
#include "Mangle.h"
+#include "TargetInfo.h"
#include "clang/CodeGen/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -42,8 +43,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Diagnostic &diags)
: BlockModule(C, M, TD, Types, *this), Context(C),
Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
- TheTargetData(TD), Diags(diags), Types(C, M, TD), MangleCtx(C),
- VtableInfo(*this), Runtime(0),
+ TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+ Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()),
+ MangleCtx(C), VtableInfo(*this), Runtime(0),
MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0),
VMContext(M.getContext()) {
@@ -66,10 +68,8 @@ CodeGenModule::~CodeGenModule() {
}
void CodeGenModule::Release() {
- // We need to call this first because it can add deferred declarations.
- EmitCXXGlobalInitFunc();
-
EmitDeferred();
+ EmitCXXGlobalInitFunc();
if (Runtime)
if (llvm::Function *ObjCInitFunction = Runtime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
@@ -378,6 +378,8 @@ void CodeGenModule::SetCommonAttributes(const Decl *D,
if (const SectionAttr *SA = D->getAttr<SectionAttr>())
GV->setSection(SA->getName());
+
+ getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
}
void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
@@ -537,7 +539,8 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
const CXXRecordDecl *RD = MD->getParent();
if (MD->isOutOfLine() && RD->isDynamicClass()) {
const CXXMethodDecl *KeyFunction = getContext().getKeyFunction(RD);
- if (KeyFunction == MD->getCanonicalDecl())
+ if (KeyFunction &&
+ KeyFunction->getCanonicalDecl() == MD->getCanonicalDecl())
return false;
}
}
@@ -876,6 +879,57 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
EmitGlobalVarDefinition(D);
}
+llvm::GlobalVariable::LinkageTypes
+CodeGenModule::getVtableLinkage(const CXXRecordDecl *RD) {
+ if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
+ return llvm::GlobalVariable::InternalLinkage;
+
+ if (const CXXMethodDecl *KeyFunction
+ = RD->getASTContext().getKeyFunction(RD)) {
+ // If this class has a key function, use that to determine the linkage of
+ // the vtable.
+ const FunctionDecl *Def = 0;
+ if (KeyFunction->getBody(Def))
+ KeyFunction = cast<CXXMethodDecl>(Def);
+
+ switch (KeyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ if (KeyFunction->isInlined())
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ return llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+ }
+
+ switch (RD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+
+ // Silence GCC warning.
+ return llvm::GlobalVariable::WeakODRLinkage;
+}
+
static CodeGenModule::GVALinkage
GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
// Everything located semantically within an anonymous namespace is
@@ -909,6 +963,7 @@ GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = 0;
QualType ASTTy = D->getType();
+ bool NonConstInit = false;
if (D->getInit() == 0) {
// This is a tentative definition; tentative definitions are
@@ -928,8 +983,9 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
if (!Init) {
QualType T = D->getInit()->getType();
if (getLangOptions().CPlusPlus) {
- CXXGlobalInits.push_back(D);
+ EmitCXXGlobalVarDeclInitFunc(D);
Init = EmitNullConstant(T);
+ NonConstInit = true;
} else {
ErrorUnsupported(D, "static initializer");
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
@@ -990,7 +1046,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(false);
- if (DeclIsConstantGlobal(Context, D))
+ if (!NonConstInit && DeclIsConstantGlobal(Context, D))
GV->setConstant(true);
GV->setAlignment(getContext().getDeclAlignInBytes(D));
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 939c66c..c7aa7a4 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -43,6 +43,7 @@ namespace llvm {
}
namespace clang {
+ class TargetCodeGenInfo;
class ASTContext;
class FunctionDecl;
class IdentifierInfo;
@@ -85,6 +86,7 @@ class CodeGenModule : public BlockModule {
const CodeGenOptions &CodeGenOpts;
llvm::Module &TheModule;
const llvm::TargetData &TheTargetData;
+ mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
Diagnostic &Diags;
CodeGenTypes Types;
MangleContext MangleCtx;
@@ -153,7 +155,7 @@ class CodeGenModule : public BlockModule {
/// CXXGlobalInits - Variables with global initializers that need to run
/// before main.
- std::vector<const VarDecl*> CXXGlobalInits;
+ std::vector<llvm::Constant*> CXXGlobalInits;
/// CFConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
@@ -191,6 +193,7 @@ public:
Diagnostic &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
+ const TargetCodeGenInfo &getTargetCodeGenInfo() const;
/// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
@@ -232,11 +235,6 @@ public:
BuildCovariantThunk(const GlobalDecl &GD, bool Extern,
const CovariantThunkAdjustment &Adjustment);
- typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t;
- typedef llvm::DenseMap<CtorVtable_t, int64_t> AddrSubMap_t;
- typedef llvm::DenseMap<const CXXRecordDecl *, AddrSubMap_t *> AddrMap_t;
- llvm::DenseMap<const CXXRecordDecl *, AddrMap_t*> AddressPoints;
-
/// GetCXXBaseClassOffset - Returns the offset from a derived class to its
/// base class. Returns null if the offset is 0.
llvm::Constant *GetCXXBaseClassOffset(const CXXRecordDecl *ClassDecl,
@@ -412,6 +410,11 @@ public:
GVA_TemplateInstantiation
};
+ /// getVtableLinkage - Return the appropriate linkage for the vtable, VTT,
+ /// and type information of the given class.
+ static llvm::GlobalVariable::LinkageTypes
+ getVtableLinkage(const CXXRecordDecl *RD);
+
private:
/// UniqueMangledName - Unique a name by (if necessary) inserting it into the
/// MangledNames string map.
@@ -475,7 +478,9 @@ private:
/// EmitCXXGlobalInitFunc - Emit a function that initializes C++ globals.
void EmitCXXGlobalInitFunc();
-
+
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D);
+
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index cd34e0c..838f62a 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -28,9 +28,9 @@ using namespace clang;
using namespace CodeGen;
CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
- const llvm::TargetData &TD)
+ const llvm::TargetData &TD, const ABIInfo &Info)
: Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
- TheABIInfo(0) {
+ TheABIInfo(Info) {
}
CodeGenTypes::~CodeGenTypes() {
@@ -38,13 +38,10 @@ CodeGenTypes::~CodeGenTypes() {
I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
I != E; ++I)
delete I->second;
- {
- llvm::FoldingSet<CGFunctionInfo>::iterator
- I = FunctionInfos.begin(), E = FunctionInfos.end();
- while (I != E)
- delete &*I++;
- }
- delete TheABIInfo;
+
+ for (llvm::FoldingSet<CGFunctionInfo>::iterator
+ I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
+ delete &*I++;
}
/// ConvertType - Convert the specified type to its LLVM form.
@@ -56,9 +53,8 @@ const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// circular types. Loop through all these defered pointees, if any, and
// resolve them now.
while (!PointersToResolve.empty()) {
- std::pair<QualType, llvm::OpaqueType*> P =
- PointersToResolve.back();
- PointersToResolve.pop_back();
+ std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
+
// We can handle bare pointers here because we know that the only pointers
// to the Opaque type are P.second and from other types. Refining the
// opqaue type away will invalidate P.second, but we don't mind :).
@@ -88,9 +84,10 @@ const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
const llvm::Type *ResultType = ConvertTypeRecursive(T);
- if (ResultType == llvm::Type::getInt1Ty(getLLVMContext()))
+ if (ResultType->isInteger(1))
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
+ // FIXME: Should assert that the llvm type and AST type has the same size.
return ResultType;
}
@@ -102,7 +99,7 @@ const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
const llvm::Type *R = ConvertType(T);
// If this is a non-bool type, don't map it.
- if (R != llvm::Type::getInt1Ty(getLLVMContext()))
+ if (!R->isInteger(1))
return R;
// Otherwise, return an integer of the target-specified size.
@@ -384,11 +381,10 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
const llvm::Type *PtrDiffTy =
ConvertTypeRecursive(Context.getPointerDiffType());
- if (ETy->isFunctionType()) {
+ if (ETy->isFunctionType())
return llvm::StructType::get(TheModule.getContext(), PtrDiffTy, PtrDiffTy,
NULL);
- } else
- return PtrDiffTy;
+ return PtrDiffTy;
}
case Type::TemplateSpecialization:
@@ -436,10 +432,8 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
// Okay, this is a definition of a type. Compile the implementation now.
- if (TD->isEnum()) {
- // Don't bother storing enums in TagDeclTypes.
+ if (TD->isEnum()) // Don't bother storing enums in TagDeclTypes.
return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
- }
// This decl could well be recursive. In this case, insert an opaque
// definition of this type, which the recursive uses will get. We will then
@@ -450,15 +444,13 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
- const llvm::Type *ResultType;
const RecordDecl *RD = cast<const RecordDecl>(TD);
// Layout fields.
- CGRecordLayout *Layout =
- CGRecordLayoutBuilder::ComputeLayout(*this, RD);
+ CGRecordLayout *Layout = CGRecordLayoutBuilder::ComputeLayout(*this, RD);
CGRecordLayouts[Key] = Layout;
- ResultType = Layout->getLLVMType();
+ const llvm::Type *ResultType = Layout->getLLVMType();
// Refine our Opaque type to ResultType. This can invalidate ResultType, so
// make sure to read the result out of the holder.
@@ -500,8 +492,7 @@ void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo,
/// getCGRecordLayout - Return record layout info for the given llvm::Type.
const CGRecordLayout &
CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
- const Type *Key =
- Context.getTagDeclType(TD).getTypePtr();
+ const Type *Key = Context.getTagDeclType(TD).getTypePtr();
llvm::DenseMap<const Type*, CGRecordLayout *>::const_iterator I
= CGRecordLayouts.find(Key);
assert (I != CGRecordLayouts.end()
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 2ff602f..7e34252 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -85,7 +85,7 @@ class CodeGenTypes {
const TargetInfo &Target;
llvm::Module& TheModule;
const llvm::TargetData& TheTargetData;
- mutable const ABIInfo* TheABIInfo;
+ const ABIInfo& TheABIInfo;
llvm::SmallVector<std::pair<QualType,
llvm::OpaqueType *>, 8> PointersToResolve;
@@ -140,13 +140,14 @@ private:
/// interface to convert type T into a llvm::Type.
const llvm::Type *ConvertNewType(QualType T);
public:
- CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD);
+ CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
+ const ABIInfo &Info);
~CodeGenTypes();
const llvm::TargetData &getTargetData() const { return TheTargetData; }
const TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
- const ABIInfo &getABIInfo() const;
+ const ABIInfo &getABIInfo() const { return TheABIInfo; }
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
/// ConvertType - Convert type T into a llvm::Type.
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 10fd1f5..d873cfe 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -199,10 +199,13 @@ void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) {
return;
}
- // <mangled-name> ::= _Z <encoding>
+ // <mangled-name> ::= _Z [L] <encoding>
// ::= <data name>
// ::= <special-name>
Out << Prefix;
+ if (D->getLinkage() == NamedDecl::InternalLinkage) // match gcc behavior
+ Out << 'L';
+
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
mangleFunctionEncoding(FD);
else
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
new file mode 100644
index 0000000..e5fd47e
--- /dev/null
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -0,0 +1,1904 @@
+//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace CodeGen;
+
+ABIInfo::~ABIInfo() {}
+
+void ABIArgInfo::dump() const {
+ llvm::raw_ostream &OS = llvm::errs();
+ OS << "(ABIArgInfo Kind=";
+ switch (TheKind) {
+ case Direct:
+ OS << "Direct";
+ break;
+ case Extend:
+ OS << "Extend";
+ break;
+ case Ignore:
+ OS << "Ignore";
+ break;
+ case Coerce:
+ OS << "Coerce Type=";
+ getCoerceToType()->print(OS);
+ break;
+ case Indirect:
+ OS << "Indirect Align=" << getIndirectAlign();
+ break;
+ case Expand:
+ OS << "Expand";
+ break;
+ }
+ OS << ")\n";
+}
+
+TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ FT = AT->getElementType();
+
+ return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i)
+ if (!isEmptyField(Context, *i, AllowArrays))
+ return false;
+ return true;
+}
+
+/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
+/// a non-trivial destructor or a non-trivial copy constructor.
+static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+
+ return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
+}
+
+/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
+/// a record type with either a non-trivial destructor or a non-trivial copy
+/// constructor.
+static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return hasNonTrivialDestructorOrCopyConstructor(RT);
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAs<BuiltinType>() && !Ty->isAnyPointerType() &&
+ !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
+ !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+/// canExpandIndirectArgument - Test whether an argument type which is to be
+/// passed indirectly (on the stack) would have the equivalent layout if it was
+/// expanded into separate arguments. If so, we prefer to do the latter to avoid
+/// inhibiting optimizations.
+///
+// FIXME: This predicate is missing many cases, currently it just follows
+// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
+// should probably make this smarter, or better yet make the LLVM backend
+// capable of handling it.
+static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // We can only expand (C) structures.
+ //
+ // FIXME: This needs to be generalized to handle classes as well.
+ const RecordDecl *RD = RT->getDecl();
+ if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+ }
+
+ return true;
+}
+
+static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (FD->getType()->isVectorType() &&
+ Context.getTypeSize(FD->getType()) >= 128)
+ return true;
+
+ if (const RecordType* RT = FD->getType()->getAs<RecordType>())
+ if (typeContainsSSEVector(RT->getDecl(), Context))
+ return true;
+ }
+
+ return false;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {};
+};
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ ASTContext &Context;
+ bool IsDarwinVectorABI;
+ bool IsSmallStructInRegABI;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+
+ static unsigned getIndirectArgumentAlignment(QualType Ty,
+ ASTContext &Context);
+
+public:
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(ASTContext &Context, bool d, bool p)
+ : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
+ IsSmallStructInRegABI(p) {}
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
+ :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {};
+};
+
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, or complex type, it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isAnyPointerType() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
+ e = RT->getDecl()->field_end(); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getCoerce(llvm::VectorType::get(
+ llvm::Type::getInt64Ty(VMContext), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ if (const RecordType *RT = RetTy->getAsStructureType()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Classify "single element" structs as their element type.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
+ if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
+ if (BT->isIntegerType()) {
+ // We need to use the size of the structure, padding
+ // bit-fields can adjust that to be larger than the single
+ // element type.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(
+ llvm::IntegerType::get(VMContext, (unsigned) Size));
+ } else if (BT->getKind() == BuiltinType::Float) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
+ } else if (BT->getKind() == BuiltinType::Double) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
+ }
+ } else if (SeltTy->isPointerType()) {
+ // FIXME: It would be really nice if this could come out as the proper
+ // pointer type.
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ return ABIArgInfo::getCoerce(PtrTy);
+ } else if (SeltTy->isVectorType()) {
+ // 64- and 128-bit vectors are never returned in a
+ // register when inside a structure.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size == 64 || Size == 128)
+ return ABIArgInfo::getIndirect(0);
+
+ return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
+ }
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
+ ASTContext &Context) {
+ unsigned Align = Context.getTypeAlign(Ty);
+ if (Align < 128) return 0;
+ if (const RecordType* RT = Ty->getAs<RecordType>())
+ if (typeContainsSSEVector(RT->getDecl(), Context))
+ return 16;
+ return 0;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAsStructureType())
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
+ Context));
+
+ // Ignore empty structs.
+ if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ if (Context.getTypeSize(Ty) <= 4*32 &&
+ canExpandIndirectArgument(Ty, Context))
+ return ABIArgInfo::getExpand();
+
+ return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
+ } else {
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ Class merge(Class Accum, Class Field) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const;
+
+ /// getCoerceResult - Given a source type \arg Ty and an LLVM type
+ /// to coerce to, chose the best way to pass Ty in the same place
+ /// that \arg CoerceTo would be passed, but while keeping the
+ /// emitted code as simple as possible.
+ ///
+ /// FIXME: Note, this should be cleaned up to just take an enumeration of all
+ /// the ways we might want to pass things, instead of constructing an LLVM
+ /// type. This makes this code more explicit, and it makes it clearer that we
+ /// are also doing this for correctness in the case of passing scalar types.
+ ABIArgInfo getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext,
+ unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+public:
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {};
+};
+
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
+ Class Field) const {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ else if (Field == Memory)
+ return Memory;
+ else if (Accum == NoClass)
+ return Field;
+ else if (Accum == Integer || Field == Integer)
+ return Integer;
+ else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ else
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty,
+ ASTContext &Context,
+ uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ } else if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
+ } else if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = Context.getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128) {
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = Context.getCanonicalType(CT->getElementType());
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (ET->isIntegralType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == Context.FloatTy)
+ Current = SSE;
+ else if (ET == Context.DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == Context.LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = Context.getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // Do post merger cleanup (see below). Only case we worry about is Memory.
+ if (Hi == Memory)
+ Lo = Memory;
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
+ classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // If this record has no fields but isn't empty, classify as INTEGER.
+ if (RD->field_empty() && Size)
+ Current = Integer;
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is MEMORY, the whole argument is
+ // passed in memory.
+ //
+ // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
+
+ // The first of these conditions is guaranteed by how we implement
+ // the merge (just bail).
+ //
+ // The second condition occurs in the case of unions; for example
+ // union { _Complex double; unsigned; }.
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const {
+ if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
+ // Integer and pointer types will end up in a general purpose
+ // register.
+ if (Ty->isIntegralType() || Ty->hasPointerRepresentation())
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
+ // FIXME: It would probably be better to make CGFunctionInfo only map using
+ // canonical types than to canonize here.
+ QualType CTy = Context.getCanonicalType(Ty);
+
+ // Float and double end up in a single SSE reg.
+ if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
+ return ABIArgInfo::getDirect();
+
+ }
+
+ return ABIArgInfo::getCoerce(CoerceTo);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty))
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+
+ bool ByVal = !isRecordWithNonTrivialDestructorOrCopyConstructor(Ty);
+
+ // FIXME: Set alignment correctly.
+ return ABIArgInfo::getIndirect(0, ByVal);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectResult(RetTy, Context);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = llvm::Type::getInt64Ty(VMContext); break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = llvm::Type::getDoubleTy(VMContext); break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
+ llvm::Type::getX86_FP80Ty(VMContext),
+ NULL);
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ assert(0 && "Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass: break;
+
+ case Integer:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getInt64Ty(VMContext), NULL);
+ break;
+ case SSE:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getDoubleTy(VMContext), NULL);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the upper half of the last used SSE register.
+ //
+ // SSEUP should always be preceeded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceeded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceeded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87)
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getDoubleTy(VMContext), NULL);
+ break;
+ }
+
+ return getCoerceResult(RetTy, ResType, Context);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext,
+ unsigned &neededInt,
+ unsigned &neededSSE) const {
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ return getIndirectResult(Ty, Context);
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+ ResType = llvm::Type::getInt64Ty(VMContext);
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE:
+ ++neededSSE;
+ ResType = llvm::Type::getDoubleTy(VMContext);
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceed by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ assert(0 && "Invalid classification for hi word.");
+ break;
+
+ case NoClass: break;
+ case Integer:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getInt64Ty(VMContext), NULL);
+ ++neededInt;
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ ResType = llvm::StructType::get(VMContext, ResType,
+ llvm::Type::getDoubleTy(VMContext), NULL);
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+ break;
+ }
+
+ return getCoerceResult(Ty, ResType, Context);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ Context, VMContext);
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, Context, VMContext,
+ neededInt, neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, Context);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // Note that we follow the ABI & gcc here, even though the type
+ // could in theory have an alignment greater than 16. This case
+ // shouldn't ever matter in practice.
+
+ // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ llvm::Type::getInt64Ty(CGF.getLLVMContext()));
+ llvm::Value *Mask = llvm::ConstantInt::get(
+ llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
+ (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+ const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
+ const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+ ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
+ neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs =
+ CGF.Builder.CreateICmpULE(gp_offset,
+ llvm::ConstantInt::get(i32Ty,
+ 48 - neededInt * 8),
+ "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ CGF.Builder.CreateICmpULE(fp_offset,
+ llvm::ConstantInt::get(i32Ty,
+ 176 - neededSSE * 16),
+ "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+ const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ const llvm::Type *TyLo = ST->getElementType(0);
+ const llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
+ "Unexpected ABI info for mixed regs");
+ const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi =
+ CGF.Builder.CreateGEP(RegAddrLo,
+ llvm::ConstantInt::get(i32Ty, 16));
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ DoubleTy, NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+ "vaarg.addr");
+ ResAddr->reserveOperandSpace(2);
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+
+ return ResAddr;
+}
+
+// PIC16 ABI Implementation
+
+namespace {
+
+class PIC16ABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {};
+};
+
+}
+
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ return ABIArgInfo::getDirect();
+}
+
+llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+// ARM ABI Implementation
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+public:
+ enum ABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP
+ };
+
+private:
+ ABIKind Kind;
+
+public:
+ ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
+
+private:
+ ABIKind getABIKind() const { return Kind; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMCOntext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
+ :TargetCodeGenInfo(new ARMABIInfo(K)) {};
+};
+
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ // ARM always overrides the calling convention.
+ switch (getABIKind()) {
+ case APCS:
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+ break;
+
+ case AAPCS:
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+ break;
+
+ case AAPCS_VFP:
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
+ break;
+ }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty))
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+
+ // Ignore empty records.
+ if (isEmptyRecord(Context, Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ const llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (Context.getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::getInt64Ty(VMContext);
+ SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::getInt32Ty(VMContext);
+ SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
+ }
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
+ const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
+ return ABIArgInfo::getCoerce(STy);
+}
+
+static bool isIntegerLikeType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Complex types "should" be ok by the definition above, but they are not.
+ if (Ty->isAnyComplexType())
+ return false;
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Check if this field is at offset 0.
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset != 0) {
+ // Allow padding bit-fields, but only if they are all at the end of the
+ // structure (despite the wording above, this matches gcc).
+ if (FD->isBitField() &&
+ !FD->getBitWidth()->EvaluateAsInt(Context).getZExtValue()) {
+ for (; i != e; ++i)
+ if (!i->isBitField() ||
+ i->getBitWidth()->EvaluateAsInt(Context).getZExtValue())
+ return false;
+
+ // All remaining fields are padding, allow this.
+ return true;
+ }
+
+ return false;
+ }
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. Again this doesn't match the
+ // wording above, but follows gcc.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (!CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+
+ // Are we following APCS?
+ if (getABIKind() == APCS) {
+ if (isEmptyRecord(Context, RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, Context, VMContext)) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+ if (Size <= 16)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
+ return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+ }
+
+ // Otherwise return in memory.
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(Context, RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size <= 32) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+ if (Size <= 16)
+ return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
+ return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+// SystemZ ABI Implementation
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+ bool isPromotableIntegerType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ Context, VMContext);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context, VMContext);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {};
+};
+
+}
+
+bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Implement
+ return 0;
+}
+
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &VMContext) const {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+}
+
+// MSP430 ABI Implementation
+
+namespace {
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {};
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() + 0xffe0;
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "vector_" +
+ llvm::LowercaseString(llvm::utohexstr(Num)),
+ GV, &M.getModule());
+ }
+ }
+}
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
+ if (TheTargetCodeGenInfo)
+ return *TheTargetCodeGenInfo;
+
+ // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
+ // free it.
+
+ const llvm::Triple &Triple(getContext().Target.getTriple());
+ switch (Triple.getArch()) {
+ default:
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo);
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // FIXME: We want to know the float calling convention as well.
+ if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(ARMABIInfo::APCS));
+
+ return *(TheTargetCodeGenInfo =
+ new ARMTargetCodeGenInfo(ARMABIInfo::AAPCS));
+
+ case llvm::Triple::pic16:
+ return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
+
+ case llvm::Triple::systemz:
+ return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
+
+ case llvm::Triple::msp430:
+ return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo());
+
+ case llvm::Triple::x86:
+ switch (Triple.getOS()) {
+ case llvm::Triple::Darwin:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Context, true, true));
+ case llvm::Triple::Cygwin:
+ case llvm::Triple::MinGW32:
+ case llvm::Triple::MinGW64:
+ case llvm::Triple::AuroraUX:
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Context, false, true));
+
+ default:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(Context, false, false));
+ }
+
+ case llvm::Triple::x86_64:
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo());
+ }
+}
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
new file mode 100644
index 0000000..495b22f
--- /dev/null
+++ b/lib/CodeGen/TargetInfo.h
@@ -0,0 +1,50 @@
+//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_TARGETINFO_H
+#define CLANG_CODEGEN_TARGETINFO_H
+
+namespace llvm {
+ class GlobalValue;
+}
+
+namespace clang {
+ class ABIInfo;
+ class Decl;
+
+ namespace CodeGen {
+ class CodeGenModule;
+ }
+
+ /// TargetCodeGenInfo - This class organizes various target-specific
+ /// codegeneration issues, like target-specific attributes, builtins and so
+ /// on.
+ class TargetCodeGenInfo {
+ ABIInfo *Info;
+ public:
+ // WARNING: Acquires the ownership of ABIInfo.
+ TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { };
+ virtual ~TargetCodeGenInfo();
+
+ /// getABIInfo() - Returns ABI info helper for the target.
+ const ABIInfo& getABIInfo() const { return *Info; }
+
+ /// SetTargetAttributes - Provides a convenient hook to handle extra
+ /// target-specific attributes for the given global.
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const { };
+ };
+}
+
+#endif // CLANG_CODEGEN_TARGETINFO_H
OpenPOWER on IntegriCloud