summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGBlocks.cpp24
-rw-r--r--lib/CodeGen/CGBlocks.h3
-rw-r--r--lib/CodeGen/CGBuiltin.cpp91
-rw-r--r--lib/CodeGen/CGCXX.cpp12
-rw-r--r--lib/CodeGen/CGCall.cpp11
-rw-r--r--lib/CodeGen/CGClass.cpp664
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp209
-rw-r--r--lib/CodeGen/CGDebugInfo.h10
-rw-r--r--lib/CodeGen/CGDecl.cpp62
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp11
-rw-r--r--lib/CodeGen/CGException.cpp235
-rw-r--r--lib/CodeGen/CGExpr.cpp335
-rw-r--r--lib/CodeGen/CGExprAgg.cpp25
-rw-r--r--lib/CodeGen/CGExprCXX.cpp50
-rw-r--r--lib/CodeGen/CGExprConstant.cpp579
-rw-r--r--lib/CodeGen/CGExprScalar.cpp133
-rw-r--r--lib/CodeGen/CGObjC.cpp216
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp230
-rw-r--r--lib/CodeGen/CGObjCMac.cpp341
-rw-r--r--lib/CodeGen/CGObjCRuntime.h5
-rw-r--r--lib/CodeGen/CGRTTI.cpp101
-rw-r--r--lib/CodeGen/CGRecordLayout.h134
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp306
-rw-r--r--lib/CodeGen/CGStmt.cpp64
-rw-r--r--lib/CodeGen/CGTemporaries.cpp12
-rw-r--r--lib/CodeGen/CGVTT.cpp24
-rw-r--r--lib/CodeGen/CGVTables.cpp (renamed from lib/CodeGen/CGVtable.cpp)451
-rw-r--r--lib/CodeGen/CGVTables.h (renamed from lib/CodeGen/CGVtable.h)39
-rw-r--r--lib/CodeGen/CGValue.h67
-rw-r--r--lib/CodeGen/CMakeLists.txt2
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp24
-rw-r--r--lib/CodeGen/CodeGenFunction.h93
-rw-r--r--lib/CodeGen/CodeGenModule.cpp239
-rw-r--r--lib/CodeGen/CodeGenModule.h37
-rw-r--r--lib/CodeGen/CodeGenTypes.h5
-rw-r--r--lib/CodeGen/Mangle.cpp239
-rw-r--r--lib/CodeGen/Mangle.h12
-rw-r--r--lib/CodeGen/TargetInfo.cpp83
38 files changed, 3089 insertions, 2089 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 5097341..db24def 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -706,7 +706,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
BlockDeclRefDecls);
// FIXME: This leaks
ImplicitParamDecl *SelfDecl =
- ImplicitParamDecl::Create(getContext(), 0,
+ ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD),
SourceLocation(), II,
ParmTy);
@@ -812,7 +812,8 @@ CharUnits BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
Pad.getQuantity()),
ArrayType::Normal, 0);
ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
- 0, QualType(PadTy), 0, VarDecl::None);
+ 0, QualType(PadTy), 0,
+ VarDecl::None, VarDecl::None);
Expr *E;
E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
SourceLocation());
@@ -860,7 +861,9 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static, false,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false,
true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -941,8 +944,9 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static, false,
- true);
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
if (NoteForHelperp) {
@@ -1025,8 +1029,9 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static, false,
- true);
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
// dst->x
@@ -1089,8 +1094,9 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static, false,
- true);
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index efee0e3..5646d00 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -38,6 +38,7 @@ namespace llvm {
class GlobalValue;
class TargetData;
class FunctionType;
+ class PointerType;
class Value;
class LLVMContext;
}
@@ -127,7 +128,7 @@ protected:
llvm::LLVMContext &VMContext;
public:
- const llvm::Type *PtrToInt8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
struct HelperInfo {
int index;
int flag;
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 38c40ed..95c41db 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -682,13 +682,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
case Builtin::BIsqrtl: {
- // Rewrite sqrt to intrinsic if allowed.
- if (!FD->hasAttr<ConstAttr>())
- break;
- Value *Arg0 = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = Arg0->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
- return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
+ // TODO: there is currently no set of optimizer flags
+ // sufficient for us to rewrite sqrt to @llvm.sqrt.
+ // -fmath-errno=0 is not good enough; we need finiteness.
+ // We could probably precondition the call with an ult
+ // against 0, but is that worth the complexity?
+ break;
}
case Builtin::BIpow:
@@ -983,8 +982,38 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateStore(Ops[1], Ops[0]);
}
case X86::BI__builtin_ia32_palignr: {
- Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+
+ llvm::SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+ const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_palignr128: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
@@ -1025,5 +1054,49 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ // vec_st
+ case PPC::BI__builtin_altivec_stvx:
+ case PPC::BI__builtin_altivec_stvxl:
+ case PPC::BI__builtin_altivec_stvebx:
+ case PPC::BI__builtin_altivec_stvehx:
+ case PPC::BI__builtin_altivec_stvewx:
+ {
+ Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext));
+ Ops[1] = !isa<Constant>(Ops[1]) || !cast<Constant>(Ops[1])->isNullValue()
+ ? Builder.CreateGEP(Ops[2], Ops[1], "tmp") : Ops[2];
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported vavg intrinsic!");
+ case PPC::BI__builtin_altivec_stvx:
+ ID = Intrinsic::ppc_altivec_stvx;
+ break;
+ case PPC::BI__builtin_altivec_stvxl:
+ ID = Intrinsic::ppc_altivec_stvxl;
+ break;
+ case PPC::BI__builtin_altivec_stvebx:
+ ID = Intrinsic::ppc_altivec_stvebx;
+ break;
+ case PPC::BI__builtin_altivec_stvehx:
+ ID = Intrinsic::ppc_altivec_stvehx;
+ break;
+ case PPC::BI__builtin_altivec_stvewx:
+ ID = Intrinsic::ppc_altivec_stvewx;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ }
+ }
return 0;
}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 93a182f..74cf113 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -297,15 +297,15 @@ void CodeGenModule::getMangledCXXDtorName(MangleBuffer &Name,
getMangleContext().mangleCXXDtor(D, Type, Name.getBuffer());
}
-static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VtableIndex,
+static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
llvm::Value *This, const llvm::Type *Ty) {
Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
- llvm::Value *Vtable = CGF.Builder.CreateBitCast(This, Ty);
- Vtable = CGF.Builder.CreateLoad(Vtable);
+ llvm::Value *VTable = CGF.Builder.CreateBitCast(This, Ty);
+ VTable = CGF.Builder.CreateLoad(VTable);
llvm::Value *VFuncPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(Vtable, VtableIndex, "vfn");
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
return CGF.Builder.CreateLoad(VFuncPtr);
}
@@ -313,7 +313,7 @@ llvm::Value *
CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
const llvm::Type *Ty) {
MD = MD->getCanonicalDecl();
- uint64_t VTableIndex = CGM.getVTables().getMethodVtableIndex(MD);
+ uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
@@ -323,7 +323,7 @@ CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
llvm::Value *&This, const llvm::Type *Ty) {
DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
uint64_t VTableIndex =
- CGM.getVTables().getMethodVtableIndex(GlobalDecl(DD, Type));
+ CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index cb1ecc1..92d15d9 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -528,7 +528,7 @@ static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
}
const llvm::Type *
-CodeGenTypes::GetFunctionTypeForVtable(const CXXMethodDecl *MD) {
+CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
@@ -586,8 +586,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
PAL.push_back(llvm::AttributeWithIndex::get(Index,
- llvm::Attribute::StructRet |
- llvm::Attribute::NoAlias));
+ llvm::Attribute::StructRet));
++Index;
// sret disables readnone and readonly
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
@@ -870,7 +869,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- const Decl *TargetDecl) {
+ const Decl *TargetDecl,
+ llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
llvm::SmallVector<llvm::Value*, 16> Args;
@@ -996,6 +996,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Args.data(), Args.data()+Args.size());
EmitBlock(Cont);
}
+ if (callOrInvoke) {
+ *callOrInvoke = CS.getInstruction();
+ }
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 177e862..a604eef 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -20,65 +20,61 @@ using namespace clang;
using namespace CodeGen;
static uint64_t
-ComputeNonVirtualBaseClassOffset(ASTContext &Context,
- const CXXBasePath &Path,
- unsigned Start) {
+ComputeNonVirtualBaseClassOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedClass,
+ CXXBaseSpecifierArray::iterator Start,
+ CXXBaseSpecifierArray::iterator End) {
uint64_t Offset = 0;
-
- for (unsigned i = Start, e = Path.size(); i != e; ++i) {
- const CXXBasePathElement& Element = Path[i];
+
+ const CXXRecordDecl *RD = DerivedClass;
+
+ for (CXXBaseSpecifierArray::iterator I = Start; I != End; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ assert(!Base->isVirtual() && "Should not see virtual bases here!");
// Get the layout.
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXBaseSpecifier *BS = Element.Base;
- assert(!BS->isVirtual() && "Should not see virtual bases here!");
-
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(BS->getType()->getAs<RecordType>()->getDecl());
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
// Add the offset.
- Offset += Layout.getBaseClassOffset(Base) / 8;
+ Offset += Layout.getBaseClassOffset(BaseDecl);
+
+ RD = BaseDecl;
}
-
- return Offset;
+
+ // FIXME: We should not use / 8 here.
+ return Offset / 8;
}
llvm::Constant *
-CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *Class,
- const CXXRecordDecl *BaseClass) {
- if (Class == BaseClass)
- return 0;
-
- CXXBasePaths Paths(/*FindAmbiguities=*/false,
- /*RecordPaths=*/true, /*DetectVirtual=*/false);
- if (!const_cast<CXXRecordDecl *>(Class)->
- isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClass), Paths)) {
- assert(false && "Class must be derived from the passed in base class!");
- return 0;
- }
+CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ const CXXBaseSpecifierArray &BasePath) {
+ assert(!BasePath.empty() && "Base path should not be empty!");
- uint64_t Offset = ComputeNonVirtualBaseClassOffset(getContext(),
- Paths.front(), 0);
+ uint64_t Offset =
+ ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
+ BasePath.begin(), BasePath.end());
if (!Offset)
return 0;
-
+
const llvm::Type *PtrDiffTy =
- Types.ConvertType(getContext().getPointerDiffType());
-
+ Types.ConvertType(getContext().getPointerDiffType());
+
return llvm::ConstantInt::get(PtrDiffTy, Offset);
}
-/// Gets the address of a virtual base class within a complete object.
+/// Gets the address of a direct base class within a complete object.
/// This should only be used for (1) non-virtual bases or (2) virtual bases
/// when the type is known to be complete (e.g. in complete destructors).
///
/// The object pointed to by 'This' is assumed to be non-null.
llvm::Value *
-CodeGenFunction::GetAddressOfBaseOfCompleteClass(llvm::Value *This,
- bool isBaseVirtual,
- const CXXRecordDecl *Derived,
- const CXXRecordDecl *Base) {
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual) {
// 'this' must be a pointer (in some address space) to Derived.
assert(This->getType()->isPointerTy() &&
cast<llvm::PointerType>(This->getType())->getElementType()
@@ -87,7 +83,7 @@ CodeGenFunction::GetAddressOfBaseOfCompleteClass(llvm::Value *This,
// Compute the offset of the virtual base.
uint64_t Offset;
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
- if (isBaseVirtual)
+ if (BaseIsVirtual)
Offset = Layout.getVBaseClassOffset(Base);
else
Offset = Layout.getBaseClassOffset(Base);
@@ -105,51 +101,63 @@ CodeGenFunction::GetAddressOfBaseOfCompleteClass(llvm::Value *This,
return V;
}
-llvm::Value *
-CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
- const CXXRecordDecl *Class,
- const CXXRecordDecl *BaseClass,
- bool NullCheckValue) {
- QualType BTy =
- getContext().getCanonicalType(
- getContext().getTypeDeclType(BaseClass));
- const llvm::Type *BasePtrTy = llvm::PointerType::getUnqual(ConvertType(BTy));
-
- if (Class == BaseClass) {
- // Just cast back.
- return Builder.CreateBitCast(Value, BasePtrTy);
- }
+static llvm::Value *
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
+ uint64_t NonVirtual, llvm::Value *Virtual) {
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Value *NonVirtualOffset = 0;
+ if (NonVirtual)
+ NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy, NonVirtual);
+
+ llvm::Value *BaseOffset;
+ if (Virtual) {
+ if (NonVirtualOffset)
+ BaseOffset = CGF.Builder.CreateAdd(Virtual, NonVirtualOffset);
+ else
+ BaseOffset = Virtual;
+ } else
+ BaseOffset = NonVirtualOffset;
+
+ // Apply the base offset.
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
- CXXBasePaths Paths(/*FindAmbiguities=*/false,
- /*RecordPaths=*/true, /*DetectVirtual=*/false);
- if (!const_cast<CXXRecordDecl *>(Class)->
- isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClass), Paths)) {
- assert(false && "Class must be derived from the passed in base class!");
- return 0;
- }
+ return ThisPtr;
+}
- unsigned Start = 0;
- llvm::Value *VirtualOffset = 0;
+llvm::Value *
+CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
+ bool NullCheckValue) {
+ assert(!BasePath.empty() && "Base path should not be empty!");
- const CXXBasePath &Path = Paths.front();
+ CXXBaseSpecifierArray::iterator Start = BasePath.begin();
const CXXRecordDecl *VBase = 0;
- for (unsigned i = 0, e = Path.size(); i != e; ++i) {
- const CXXBasePathElement& Element = Path[i];
- if (Element.Base->isVirtual()) {
- Start = i+1;
- QualType VBaseType = Element.Base->getType();
- VBase = cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
- }
+
+ // Get the virtual base.
+ if ((*Start)->isVirtual()) {
+ VBase =
+ cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
+ ++Start;
}
+
+ uint64_t NonVirtualOffset =
+ ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
+ Start, BasePath.end());
- uint64_t Offset =
- ComputeNonVirtualBaseClassOffset(getContext(), Paths.front(), Start);
+ // Get the base pointer type.
+ const llvm::Type *BasePtrTy =
+ ConvertType((BasePath.end()[-1])->getType())->getPointerTo();
- if (!Offset && !VBase) {
+ if (!NonVirtualOffset && !VBase) {
// Just cast back.
return Builder.CreateBitCast(Value, BasePtrTy);
}
-
+
llvm::BasicBlock *CastNull = 0;
llvm::BasicBlock *CastNotNull = 0;
llvm::BasicBlock *CastEnd = 0;
@@ -165,28 +173,15 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
-
+
+ llvm::Value *VirtualOffset = 0;
+
if (VBase)
- VirtualOffset = GetVirtualBaseClassOffset(Value, Class, VBase);
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
- const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
- llvm::Value *NonVirtualOffset = 0;
- if (Offset)
- NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy, Offset);
-
- llvm::Value *BaseOffset;
- if (VBase) {
- if (NonVirtualOffset)
- BaseOffset = Builder.CreateAdd(VirtualOffset, NonVirtualOffset);
- else
- BaseOffset = VirtualOffset;
- } else
- BaseOffset = NonVirtualOffset;
-
- // Apply the base offset.
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- Value = Builder.CreateBitCast(Value, Int8PtrTy);
- Value = Builder.CreateGEP(Value, BaseOffset, "add.ptr");
+ // Apply the offsets.
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset,
+ VirtualOffset);
// Cast back.
Value = Builder.CreateBitCast(Value, BasePtrTy);
@@ -210,21 +205,17 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
llvm::Value *
CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
- const CXXRecordDecl *Class,
- const CXXRecordDecl *DerivedClass,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
bool NullCheckValue) {
+ assert(!BasePath.empty() && "Base path should not be empty!");
+
QualType DerivedTy =
- getContext().getCanonicalType(
- getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(DerivedClass)));
+ getContext().getCanonicalType(getContext().getTagDeclType(Derived));
const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
- if (Class == DerivedClass) {
- // Just cast back.
- return Builder.CreateBitCast(Value, DerivedPtrTy);
- }
-
llvm::Value *NonVirtualOffset =
- CGM.GetNonVirtualBaseClassOffset(DerivedClass, Class);
+ CGM.GetNonVirtualBaseClassOffset(Derived, BasePath);
if (!NonVirtualOffset) {
// No offset, we can just cast back.
@@ -274,23 +265,16 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
/// EmitCopyCtorCall - Emit a call to a copy constructor.
static void
-EmitCopyCtorCall(CodeGenFunction &CGF,
- const CXXConstructorDecl *CopyCtor, CXXCtorType CopyCtorType,
- llvm::Value *ThisPtr, llvm::Value *VTT, llvm::Value *Src) {
- llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, CopyCtorType);
+EmitCopyCtorCall(CodeGenFunction &CGF, const CXXConstructorDecl *CopyCtor,
+ llvm::Value *ThisPtr, llvm::Value *Src) {
+ llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, Ctor_Complete);
CallArgList CallArgs;
// Push the this ptr.
CallArgs.push_back(std::make_pair(RValue::get(ThisPtr),
CopyCtor->getThisType(CGF.getContext())));
-
- // Push the VTT parameter if necessary.
- if (VTT) {
- QualType T = CGF.getContext().getPointerType(CGF.getContext().VoidPtrTy);
- CallArgs.push_back(std::make_pair(RValue::get(VTT), T));
- }
-
+
// Push the Src ptr.
CallArgs.push_back(std::make_pair(RValue::get(Src),
CopyCtor->getParamDecl(0)->getType()));
@@ -325,13 +309,8 @@ EmitCopyCtorCall(CodeGenFunction &CGF,
// FIXME. Consolidate this with EmitCXXAggrConstructorCall.
void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
llvm::Value *Src,
- const ArrayType *Array,
- const CXXRecordDecl *BaseClassDecl,
- QualType Ty) {
- const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
- assert(CA && "VLA cannot be copied over");
- bool BitwiseCopy = BaseClassDecl->hasTrivialCopyConstructor();
-
+ const ConstantArrayType *Array,
+ const CXXRecordDecl *ClassDecl) {
// Create a temporary for the loop index and initialize it with 0.
llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
"loop.index");
@@ -347,7 +326,7 @@ void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
// Generate: if (loop-index < number-of-elements fall to the loop body,
// otherwise, go to the block after the for-loop.
- uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
+ uint64_t NumElements = getContext().getConstantArrayElementCount(Array);
llvm::Value * NumElementsPtr =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
@@ -362,95 +341,8 @@ void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
Counter = Builder.CreateLoad(IndexPtr);
Src = Builder.CreateInBoundsGEP(Src, Counter, "srcaddress");
Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
- if (BitwiseCopy)
- EmitAggregateCopy(Dest, Src, Ty);
- else if (CXXConstructorDecl *BaseCopyCtor =
- BaseClassDecl->getCopyConstructor(getContext(), 0))
- EmitCopyCtorCall(*this, BaseCopyCtor, Ctor_Complete, Dest, 0, Src);
-
- EmitBlock(ContinueBlock);
-
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
- Counter = Builder.CreateLoad(IndexPtr);
- NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
- Builder.CreateStore(NextVal, IndexPtr);
-
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
-}
-
-/// EmitClassAggrCopyAssignment - This routine generates code to assign a class
-/// array of objects from SrcValue to DestValue. Assignment can be either a
-/// bitwise assignment or via a copy assignment operator function call.
-/// FIXME. This can be consolidated with EmitClassAggrMemberwiseCopy
-void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
- llvm::Value *Src,
- const ArrayType *Array,
- const CXXRecordDecl *BaseClassDecl,
- QualType Ty) {
- const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
- assert(CA && "VLA cannot be asssigned");
- bool BitwiseAssign = BaseClassDecl->hasTrivialCopyAssignment();
-
- // Create a temporary for the loop index and initialize it with 0.
- llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
- "loop.index");
- llvm::Value* zeroConstant =
- llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
- Builder.CreateStore(zeroConstant, IndexPtr);
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
- // Generate: if (loop-index < number-of-elements fall to the loop body,
- // otherwise, go to the block after the for-loop.
- uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
- llvm::Value * NumElementsPtr =
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
- "isless");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- EmitBlock(ForBody);
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
- // Inside the loop body, emit the assignment operator call on array element.
- Counter = Builder.CreateLoad(IndexPtr);
- Src = Builder.CreateInBoundsGEP(Src, Counter, "srcaddress");
- Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
- const CXXMethodDecl *MD = 0;
- if (BitwiseAssign)
- EmitAggregateCopy(Dest, Src, Ty);
- else {
- BaseClassDecl->hasConstCopyAssignment(getContext(), MD);
- assert(MD && "EmitClassAggrCopyAssignment - No user assign");
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *LTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, LTy);
-
- CallArgList CallArgs;
- // Push the this (Dest) ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Dest),
- MD->getThisType(getContext())));
-
- // Push the Src ptr.
- QualType SrcTy = MD->getParamDecl(0)->getType();
- RValue SrcValue = SrcTy->isReferenceType() ? RValue::get(Src) :
- RValue::getAggregate(Src);
- CallArgs.push_back(std::make_pair(SrcValue, SrcTy));
- EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, MD);
- }
+ EmitClassMemberwiseCopy(Dest, Src, ClassDecl);
+
EmitBlock(ContinueBlock);
// Emit the increment of the loop counter.
@@ -468,7 +360,8 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
/// GetVTTParameter - Return the VTT parameter that should be passed to a
/// base constructor/destructor with virtual bases.
-static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
+static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
+ bool ForVirtualBase) {
if (!CodeGenVTables::needsVTTParameter(GD)) {
// This constructor/destructor does not need a VTT parameter.
return 0;
@@ -486,9 +379,16 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
if (RD == Base) {
assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) &&
"doing no-op VTT offset in base dtor/ctor?");
+ assert(!ForVirtualBase && "Can't have same class as virtual base!");
SubVTTIndex = 0;
} else {
- SubVTTIndex = CGF.CGM.getVTables().getSubVTTIndex(RD, Base);
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(RD);
+ uint64_t BaseOffset = ForVirtualBase ?
+ Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base);
+
+ SubVTTIndex =
+ CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
}
@@ -511,75 +411,16 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
/// or via a copy constructor call.
void CodeGenFunction::EmitClassMemberwiseCopy(
llvm::Value *Dest, llvm::Value *Src,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl, QualType Ty) {
- CXXCtorType CtorType = Ctor_Complete;
-
- if (ClassDecl) {
- Dest = GetAddressOfBaseClass(Dest, ClassDecl, BaseClassDecl,
- /*NullCheckValue=*/false);
- Src = GetAddressOfBaseClass(Src, ClassDecl, BaseClassDecl,
- /*NullCheckValue=*/false);
-
- // We want to call the base constructor.
- CtorType = Ctor_Base;
- }
- if (BaseClassDecl->hasTrivialCopyConstructor()) {
- EmitAggregateCopy(Dest, Src, Ty);
+ const CXXRecordDecl *ClassDecl) {
+ if (ClassDecl->hasTrivialCopyConstructor()) {
+ EmitAggregateCopy(Dest, Src, getContext().getTagDeclType(ClassDecl));
return;
}
- CXXConstructorDecl *BaseCopyCtor =
- BaseClassDecl->getCopyConstructor(getContext(), 0);
- if (!BaseCopyCtor)
- return;
+ CXXConstructorDecl *CopyCtor = ClassDecl->getCopyConstructor(getContext(), 0);
+ assert(CopyCtor && "Did not have copy ctor!");
- llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(BaseCopyCtor, CtorType));
- EmitCopyCtorCall(*this, BaseCopyCtor, CtorType, Dest, VTT, Src);
-}
-
-/// EmitClassCopyAssignment - This routine generates code to copy assign a class
-/// object from SrcValue to DestValue. Assignment can be either a bitwise
-/// assignment of via an assignment operator call.
-// FIXME. Consolidate this with EmitClassMemberwiseCopy as they share a lot.
-void CodeGenFunction::EmitClassCopyAssignment(
- llvm::Value *Dest, llvm::Value *Src,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl,
- QualType Ty) {
- if (ClassDecl) {
- Dest = GetAddressOfBaseClass(Dest, ClassDecl, BaseClassDecl,
- /*NullCheckValue=*/false);
- Src = GetAddressOfBaseClass(Src, ClassDecl, BaseClassDecl,
- /*NullCheckValue=*/false);
- }
- if (BaseClassDecl->hasTrivialCopyAssignment()) {
- EmitAggregateCopy(Dest, Src, Ty);
- return;
- }
-
- const CXXMethodDecl *MD = 0;
- BaseClassDecl->hasConstCopyAssignment(getContext(), MD);
- assert(MD && "EmitClassCopyAssignment - missing copy assign");
-
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *LTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, LTy);
-
- CallArgList CallArgs;
- // Push the this (Dest) ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Dest),
- MD->getThisType(getContext())));
-
- // Push the Src ptr.
- QualType SrcTy = MD->getParamDecl(0)->getType();
- RValue SrcValue = SrcTy->isReferenceType() ? RValue::get(Src) :
- RValue::getAggregate(Src);
- CallArgs.push_back(std::make_pair(SrcValue, SrcTy));
- EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, MD);
+ EmitCopyCtorCall(*this, CopyCtor, Dest, Src);
}
/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a
@@ -600,30 +441,25 @@ void CodeGenFunction::EmitClassCopyAssignment(
void
CodeGenFunction::SynthesizeCXXCopyConstructor(const FunctionArgList &Args) {
const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
+ CXXCtorType CtorType = CurGD.getCtorType();
+ (void) CtorType;
+
const CXXRecordDecl *ClassDecl = Ctor->getParent();
assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
"SynthesizeCXXCopyConstructor - copy constructor has definition already");
assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor");
- FunctionArgList::const_iterator i = Args.begin();
- const VarDecl *ThisArg = i->first;
- llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
- llvm::Value *LoadOfThis = Builder.CreateLoad(ThisObj, "this");
- const VarDecl *SrcArg = (i+1)->first;
- llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
- llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
-
- for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
- Base != ClassDecl->bases_end(); ++Base) {
- // FIXME. copy constrution of virtual base NYI
- if (Base->isVirtual())
- continue;
+ llvm::Value *ThisPtr = LoadCXXThis();
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- EmitClassMemberwiseCopy(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
- Base->getType());
- }
+ // Find the source pointer.
+ unsigned SrcArgIndex = Args.size() - 1;
+ assert(CtorType == Ctor_Base || SrcArgIndex == 1);
+ assert(CtorType != Ctor_Base ||
+ (ClassDecl->getNumVBases() != 0 && SrcArgIndex == 2) ||
+ SrcArgIndex == 1);
+
+ llvm::Value *SrcPtr =
+ Builder.CreateLoad(GetAddrOfLocalVar(Args[SrcArgIndex].first));
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
E = ClassDecl->field_end(); I != E; ++I) {
@@ -638,27 +474,26 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const FunctionArgList &Args) {
if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
CXXRecordDecl *FieldClassDecl
= cast<CXXRecordDecl>(FieldClassType->getDecl());
- LValue LHS = EmitLValueForField(LoadOfThis, Field, 0);
- LValue RHS = EmitLValueForField(LoadOfSrc, Field, 0);
+ LValue LHS = EmitLValueForField(ThisPtr, Field, 0);
+ LValue RHS = EmitLValueForField(SrcPtr, Field, 0);
if (Array) {
- const llvm::Type *BasePtr = ConvertType(FieldType);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ const llvm::Type *BasePtr = ConvertType(FieldType)->getPointerTo();
llvm::Value *DestBaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
llvm::Value *SrcBaseAddrPtr =
Builder.CreateBitCast(RHS.getAddress(), BasePtr);
EmitClassAggrMemberwiseCopy(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
- FieldClassDecl, FieldType);
+ FieldClassDecl);
}
else
EmitClassMemberwiseCopy(LHS.getAddress(), RHS.getAddress(),
- 0 /*ClassDecl*/, FieldClassDecl, FieldType);
+ FieldClassDecl);
continue;
}
// Do a built-in assignment of scalar data members.
- LValue LHS = EmitLValueForFieldInitialization(LoadOfThis, Field, 0);
- LValue RHS = EmitLValueForFieldInitialization(LoadOfSrc, Field, 0);
+ LValue LHS = EmitLValueForFieldInitialization(ThisPtr, Field, 0);
+ LValue RHS = EmitLValueForFieldInitialization(SrcPtr, Field, 0);
if (!hasAggregateLLVMType(Field->getType())) {
RValue RVRHS = EmitLoadOfLValue(RHS, Field->getType());
@@ -675,100 +510,6 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const FunctionArgList &Args) {
InitializeVTablePointers(ClassDecl);
}
-/// SynthesizeCXXCopyAssignment - Implicitly define copy assignment operator.
-/// Before the implicitly-declared copy assignment operator for a class is
-/// implicitly defined, all implicitly- declared copy assignment operators for
-/// its direct base classes and its nonstatic data members shall have been
-/// implicitly defined. [12.8-p12]
-/// The implicitly-defined copy assignment operator for class X performs
-/// memberwise assignment of its subob- jects. The direct base classes of X are
-/// assigned first, in the order of their declaration in
-/// the base-specifier-list, and then the immediate nonstatic data members of X
-/// are assigned, in the order in which they were declared in the class
-/// definition.Each subobject is assigned in the manner appropriate to its type:
-/// if the subobject is of class type, the copy assignment operator for the
-/// class is used (as if by explicit qualification; that is, ignoring any
-/// possible virtual overriding functions in more derived classes);
-///
-/// if the subobject is an array, each element is assigned, in the manner
-/// appropriate to the element type;
-///
-/// if the subobject is of scalar type, the built-in assignment operator is
-/// used.
-void CodeGenFunction::SynthesizeCXXCopyAssignment(const FunctionArgList &Args) {
- const CXXMethodDecl *CD = cast<CXXMethodDecl>(CurGD.getDecl());
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
- assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
- "SynthesizeCXXCopyAssignment - copy assignment has user declaration");
-
- FunctionArgList::const_iterator i = Args.begin();
- const VarDecl *ThisArg = i->first;
- llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
- llvm::Value *LoadOfThis = Builder.CreateLoad(ThisObj, "this");
- const VarDecl *SrcArg = (i+1)->first;
- llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
- llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
-
- for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
- Base != ClassDecl->bases_end(); ++Base) {
- // FIXME. copy assignment of virtual base NYI
- if (Base->isVirtual())
- continue;
-
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- EmitClassCopyAssignment(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
- Base->getType());
- }
-
- for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
- FieldEnd = ClassDecl->field_end();
- Field != FieldEnd; ++Field) {
- QualType FieldType = getContext().getCanonicalType((*Field)->getType());
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(FieldType);
- if (Array)
- FieldType = getContext().getBaseElementType(FieldType);
-
- if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
- CXXRecordDecl *FieldClassDecl
- = cast<CXXRecordDecl>(FieldClassType->getDecl());
- LValue LHS = EmitLValueForField(LoadOfThis, *Field, 0);
- LValue RHS = EmitLValueForField(LoadOfSrc, *Field, 0);
- if (Array) {
- const llvm::Type *BasePtr = ConvertType(FieldType);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *DestBaseAddrPtr =
- Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- llvm::Value *SrcBaseAddrPtr =
- Builder.CreateBitCast(RHS.getAddress(), BasePtr);
- EmitClassAggrCopyAssignment(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
- FieldClassDecl, FieldType);
- }
- else
- EmitClassCopyAssignment(LHS.getAddress(), RHS.getAddress(),
- 0 /*ClassDecl*/, FieldClassDecl, FieldType);
- continue;
- }
- // Do a built-in assignment of scalar data members.
- LValue LHS = EmitLValueForField(LoadOfThis, *Field, 0);
- LValue RHS = EmitLValueForField(LoadOfSrc, *Field, 0);
- if (!hasAggregateLLVMType(Field->getType())) {
- RValue RVRHS = EmitLoadOfLValue(RHS, Field->getType());
- EmitStoreThroughLValue(RVRHS, LHS, Field->getType());
- } else if (Field->getType()->isAnyComplexType()) {
- ComplexPairTy Pair = LoadComplexFromAddr(RHS.getAddress(),
- RHS.isVolatileQualified());
- StoreComplexToAddr(Pair, LHS.getAddress(), LHS.isVolatileQualified());
- } else {
- EmitAggregateCopy(LHS.getAddress(), RHS.getAddress(), Field->getType());
- }
- }
-
- // return *this;
- Builder.CreateStore(LoadOfThis, ReturnValue);
-}
-
static void EmitBaseInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXBaseOrMemberInitializer *BaseInit,
@@ -782,15 +523,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
CXXRecordDecl *BaseClassDecl =
cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
- // FIXME: This method of determining whether a base is virtual is ridiculous;
- // it should be part of BaseInit.
- bool isBaseVirtual = false;
- for (CXXRecordDecl::base_class_const_iterator I = ClassDecl->vbases_begin(),
- E = ClassDecl->vbases_end(); I != E; ++I)
- if (I->getType()->getAs<RecordType>()->getDecl() == BaseClassDecl) {
- isBaseVirtual = true;
- break;
- }
+ bool isBaseVirtual = BaseInit->isBaseVirtual();
// The base constructor doesn't construct virtual bases.
if (CtorType == Ctor_Base && isBaseVirtual)
@@ -798,9 +531,10 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
// We can pretend to be a complete class because it only matters for
// virtual bases, and we only do virtual bases for complete ctors.
- llvm::Value *V = ThisPtr;
- V = CGF.GetAddressOfBaseOfCompleteClass(V, isBaseVirtual,
- ClassDecl, BaseClassDecl);
+ llvm::Value *V =
+ CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
+ BaseClassDecl,
+ BaseInit->isBaseVirtual());
CGF.EmitAggExpr(BaseInit->getInit(), V, false, false, true);
@@ -809,7 +543,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
CodeGenFunction::EHCleanupBlock Cleanup(CGF);
CXXDestructorDecl *DD = BaseClassDecl->getDestructor(CGF.getContext());
- CGF.EmitCXXDestructorCall(DD, Dtor_Base, V);
+ CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V);
}
}
@@ -850,7 +584,9 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LHS.isVolatileQualified());
} else {
CGF.EmitAggExpr(MemberInit->getInit(), LHS.getAddress(),
- LHS.isVolatileQualified(), false, true);
+ LHS.isVolatileQualified(),
+ /*IgnoreResult*/ false,
+ /*IsInitializer*/ true);
if (!CGF.Exceptions)
return;
@@ -868,7 +604,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
CXXDestructorDecl *DD = RD->getDestructor(CGF.getContext());
- CGF.EmitCXXDestructorCall(DD, Dtor_Complete, LHS.getAddress());
+ CGF.EmitCXXDestructorCall(DD, Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
}
}
}
@@ -976,8 +713,6 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> MemberInitializers;
- // FIXME: Add vbase initialization
-
for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
E = CD->init_end();
B != E; ++B) {
@@ -1028,7 +763,8 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// variant, then call the appropriate operator delete() on the way
// out.
if (DtorType == Dtor_Deleting) {
- EmitCXXDestructorCall(Dtor, Dtor_Complete, LoadCXXThis());
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LoadCXXThis());
SkipBody = true;
// If this is the complete variant, just invoke the base variant;
@@ -1036,7 +772,8 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// this optimization if the body is a function-try-block, because
// we'd introduce *two* handler blocks.
} else if (!isTryBody && DtorType == Dtor_Complete) {
- EmitCXXDestructorCall(Dtor, Dtor_Base, LoadCXXThis());
+ EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
+ LoadCXXThis());
SkipBody = true;
// Otherwise, we're in the base variant, so we need to ensure the
@@ -1117,11 +854,11 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
if (BaseClassDecl->hasTrivialDestructor())
continue;
const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
- llvm::Value *V = GetAddressOfBaseOfCompleteClass(LoadCXXThis(),
- true,
- ClassDecl,
- BaseClassDecl);
- EmitCXXDestructorCall(D, Dtor_Base, V);
+ llvm::Value *V =
+ GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(),
+ ClassDecl, BaseClassDecl,
+ /*BaseIsVirtual=*/true);
+ EmitCXXDestructorCall(D, Dtor_Base, /*ForVirtualBase=*/true, V);
}
return;
}
@@ -1175,7 +912,8 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
Array, BaseAddrPtr);
} else
EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
- Dtor_Complete, LHS.getAddress());
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
}
// Destroy non-virtual bases.
@@ -1193,12 +931,14 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
// Ignore trivial destructors.
if (BaseClassDecl->hasTrivialDestructor())
continue;
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
-
- llvm::Value *V = GetAddressOfBaseClass(LoadCXXThis(),
- ClassDecl, BaseClassDecl,
- /*NullCheckValue=*/false);
- EmitCXXDestructorCall(D, Dtor_Base, V);
+
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ llvm::Value *V =
+ GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(), ClassDecl,
+ BaseClassDecl,
+ /*BaseIsVirtual=*/false);
+
+ EmitCXXDestructorCall(D, Dtor_Base, /*ForVirtualBase=*/false, V);
}
}
@@ -1271,7 +1011,8 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
{
CXXTemporariesCleanupScope Scope(*this);
- EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd);
+ EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
+ ArgBeg, ArgEnd);
}
EmitBlock(ContinueBlock);
@@ -1345,7 +1086,7 @@ CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
Counter = Builder.CreateLoad(IndexPtr);
Counter = Builder.CreateSub(Counter, One);
llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
- EmitCXXDestructorCall(D, Dtor_Complete, Address);
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Address);
EmitBlock(ContinueBlock);
@@ -1390,6 +1131,7 @@ CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
FunctionDecl::Static,
+ FunctionDecl::None,
false, true);
StartFunction(FD, R, Fn, Args, SourceLocation());
QualType BaseElementTy = getContext().getBaseElementType(Array);
@@ -1407,7 +1149,7 @@ CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
void
CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
- CXXCtorType Type,
+ CXXCtorType Type, bool ForVirtualBase,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd) {
@@ -1429,7 +1171,7 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
return;
}
- llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type));
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
@@ -1450,7 +1192,8 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
++I;
// vtt
- if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType))) {
+ if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType),
+ /*ForVirtualBase=*/false)) {
QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
DelegateArgs.push_back(std::make_pair(RValue::get(VTT), VoidPP));
@@ -1502,8 +1245,10 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
+ bool ForVirtualBase,
llvm::Value *This) {
- llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type));
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
+ ForVirtualBase);
llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
@@ -1538,7 +1283,8 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
void
CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
- bool BaseIsMorallyVirtual,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass) {
const CXXRecordDecl *RD = Base.getBase();
@@ -1548,7 +1294,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
// Check if we need to use a vtable from the VTT.
if (CodeGenVTables::needsVTTParameter(CurGD) &&
- (RD->getNumVBases() || BaseIsMorallyVirtual)) {
+ (RD->getNumVBases() || NearestVBase)) {
// Get the secondary vpointer index.
uint64_t VirtualPointerIndex =
CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
@@ -1567,20 +1313,27 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
}
// Compute where to store the address point.
- llvm::Value *VTableField;
+ llvm::Value *VirtualOffset = 0;
+ uint64_t NonVirtualOffset = 0;
- if (CodeGenVTables::needsVTTParameter(CurGD) && BaseIsMorallyVirtual) {
+ if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
// We need to use the virtual base offset offset because the virtual base
// might have a different offset in the most derived class.
- VTableField = GetAddressOfBaseClass(LoadCXXThis(), VTableClass, RD,
- /*NullCheckValue=*/false);
+ VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
+ NearestVBase);
+ NonVirtualOffset = OffsetFromNearestVBase / 8;
} else {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
-
- VTableField = Builder.CreateBitCast(LoadCXXThis(), Int8PtrTy);
- VTableField =
- Builder.CreateConstInBoundsGEP1_64(VTableField, Base.getBaseOffset() / 8);
+ // We can just use the base offset in the complete class.
+ NonVirtualOffset = Base.getBaseOffset() / 8;
}
+
+ // Apply the offsets.
+ llvm::Value *VTableField = LoadCXXThis();
+
+ if (NonVirtualOffset || VirtualOffset)
+ VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
+ NonVirtualOffset,
+ VirtualOffset);
// Finally, store the address point.
const llvm::Type *AddressPointPtrTy =
@@ -1591,7 +1344,8 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
void
CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
- bool BaseIsMorallyVirtual,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass,
@@ -1600,7 +1354,8 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
// been set.
if (!BaseIsNonVirtualPrimaryBase) {
// Initialize the vtable pointer for this base.
- InitializeVTablePointer(Base, BaseIsMorallyVirtual, VTable, VTableClass);
+ InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
+ VTable, VTableClass);
}
const CXXRecordDecl *RD = Base.getBase();
@@ -1616,7 +1371,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
continue;
uint64_t BaseOffset;
- bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ uint64_t BaseOffsetFromNearestVBase;
bool BaseDeclIsNonVirtualPrimaryBase;
if (I->isVirtual()) {
@@ -1628,17 +1383,20 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
getContext().getASTRecordLayout(VTableClass);
BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
- BaseDeclIsMorallyVirtual = true;
+ BaseOffsetFromNearestVBase = 0;
BaseDeclIsNonVirtualPrimaryBase = false;
} else {
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase =
+ OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
}
InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
- BaseDeclIsMorallyVirtual,
+ I->isVirtual() ? BaseDecl : NearestVBase,
+ BaseOffsetFromNearestVBase,
BaseDeclIsNonVirtualPrimaryBase,
VTable, VTableClass, VBases);
}
@@ -1654,8 +1412,8 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
// Initialize the vtable pointers for this class and all of its bases.
VisitedVirtualBasesSetTy VBases;
- InitializeVTablePointers(BaseSubobject(RD, 0),
- /*BaseIsMorallyVirtual=*/false,
+ InitializeVTablePointers(BaseSubobject(RD, 0), /*NearestVBase=*/0,
+ /*OffsetFromNearestVBase=*/0,
/*BaseIsNonVirtualPrimaryBase=*/false,
VTable, RD, VBases);
}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index bcbda8a..4963e73 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -320,27 +320,9 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FieldOffset = 0;
FType = CGM.getContext().UnsignedLongTy;
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "reserved", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
- FieldOffset += FieldSize;
- FType = CGM.getContext().UnsignedLongTy;
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "Size", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
-
- FieldOffset += FieldSize;
Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
EltTys.clear();
@@ -360,49 +342,13 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FieldOffset = 0;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__isa", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
-
- FieldOffset += FieldSize;
- FType = CGM.getContext().IntTy;
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__flags", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
-
- FieldOffset += FieldSize;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
FType = CGM.getContext().IntTy;
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__reserved", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
-
- FieldOffset += FieldSize;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__FuncPtr", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
- FieldOffset += FieldSize;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
FieldTy = DescTy;
FieldSize = CGM.getContext().getTypeSize(Ty);
@@ -525,13 +471,20 @@ CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+ unsigned Flags = 0;
+ AccessSpecifier Access = I->getAccess();
+ if (Access == clang::AS_private)
+ Flags |= llvm::DIType::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ Flags |= llvm::DIType::FlagProtected;
+
// Create a DW_TAG_member node to remember the offset of this field in the
// struct. FIXME: This is an absolutely insane way to capture this
// information. When we gut debug info, this should be fixed.
FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
FieldName, FieldDefUnit,
FieldLine, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldOffset, Flags, FieldTy);
EltTys.push_back(FieldTy);
}
}
@@ -626,7 +579,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// It doesn't make sense to give a virtual destructor a vtable index,
// since a single destructor has two entries in the vtable.
if (!isa<CXXDestructorDecl>(Method))
- VIndex = CGM.getVTables().getMethodVtableIndex(Method);
+ VIndex = CGM.getVTables().getMethodVTableIndex(Method);
ContainingType = RecordTy;
}
@@ -734,8 +687,8 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
return VTablePtrType;
}
-/// getVtableName - Get vtable name for the given Class.
-llvm::StringRef CGDebugInfo::getVtableName(const CXXRecordDecl *RD) {
+/// getVTableName - Get vtable name for the given Class.
+llvm::StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
// Otherwise construct gdb compatible name name.
std::string Name = "_vptr$" + RD->getNameAsString();
@@ -746,10 +699,10 @@ llvm::StringRef CGDebugInfo::getVtableName(const CXXRecordDecl *RD) {
}
-/// CollectVtableInfo - If the C++ class has vtable info then insert appropriate
+/// CollectVTableInfo - If the C++ class has vtable info then insert appropriate
/// debug info entry in EltTys vector.
void CGDebugInfo::
-CollectVtableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
+CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
@@ -764,7 +717,7 @@ CollectVtableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType VPTR
= DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- getVtableName(RD), Unit,
+ getVTableName(RD), Unit,
0, Size, 0, 0, 0,
getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
@@ -832,7 +785,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
CollectCXXBases(CXXDecl, Unit, EltTys, FwdDecl);
- CollectVtableInfo(CXXDecl, Unit, EltTys);
+ CollectVTableInfo(CXXDecl, Unit, EltTys);
}
CollectRecordFields(RD, Unit, EltTys);
llvm::MDNode *ContainingType = NULL;
@@ -1293,7 +1246,6 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::MemberPointer:
return CreateType(cast<MemberPointerType>(Ty), Unit);
- case Type::InjectedClassName:
case Type::TemplateSpecialization:
case Type::Elaborated:
case Type::QualifiedName:
@@ -1318,6 +1270,21 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return llvm::DIType();
}
+/// CreateMemberType - Create new member and increase Offset by FType's size.
+llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
+ llvm::StringRef Name,
+ uint64_t *Offset) {
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
+ unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
+ llvm::DIType Ty = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
+ Unit, Name, Unit, 0,
+ FieldSize, FieldAlign,
+ *Offset, 0, FieldTy);
+ *Offset += FieldSize;
+ return Ty;
+}
+
/// EmitFunctionStart - Constructs the debug code for entering a function -
/// "llvm.dbg.func.start.".
void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
@@ -1341,17 +1308,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
}
Name = getFunctionName(FD);
- if (!Name.empty() && Name[0] == '\01')
- Name = Name.substr(1);
// Use mangled name as linkage name for c/c++ functions.
CGM.getMangledName(LinkageName, GD);
} else {
// Use llvm function name as linkage name.
Name = Fn->getName();
LinkageName.setString(Name);
- if (!Name.empty() && Name[0] == '\01')
- Name = Name.substr(1);
}
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
// It is expected that CurLoc is set before using EmitFunctionStart.
// Usually, CurLoc points to the left bracket location of compound
@@ -1437,72 +1402,19 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
FieldOffset = 0;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__isa", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
- FieldOffset += FieldSize;
-
- FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__forwarding", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
- FieldOffset += FieldSize;
-
- FType = CGM.getContext().IntTy;
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__flags", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
- FieldOffset += FieldSize;
-
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset));
FType = CGM.getContext().IntTy;
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__size", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
- FieldOffset += FieldSize;
-
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
+
bool HasCopyAndDispose = CGM.BlockRequiresCopying(Type);
if (HasCopyAndDispose) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__copy_helper", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
- FieldOffset += FieldSize;
-
- FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
- "__destroy_helper", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
- FieldOffset += FieldSize;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
+ &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__destroy_helper",
+ &FieldOffset));
}
CharUnits Align = CGM.getContext().getDeclAlign(VD);
@@ -1517,20 +1429,12 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
llvm::APInt pad(32, NumPaddingBytes);
FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
pad, ArrayType::Normal, 0);
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
- FieldSize = CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
- FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
- Unit, "", Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
- EltTys.push_back(FieldTy);
- FieldOffset += FieldSize;
+ EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
}
}
FType = Type;
- FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
FieldSize = CGM.getContext().getTypeSize(FType);
FieldAlign = Align.getQuantity()*8;
@@ -1558,13 +1462,6 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage, CGBuilderTy &Builder) {
assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
- // Do not emit variable debug information while generating optimized code.
- // The llvm optimizer and code generator are not yet ready to support
- // optimized code debugging.
- const CodeGenOptions &CGO = CGM.getCodeGenOpts();
- if (CGO.OptimizationLevel)
- return;
-
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::DIType Ty;
uint64_t XOffset = 0;
@@ -1608,11 +1505,7 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
const ValueDecl *VD = BDRE->getDecl();
assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
- // Do not emit variable debug information while generating optimized code.
- // The llvm optimizer and code generator are not yet ready to support
- // optimized code debugging.
- const CodeGenOptions &CGO = CGM.getCodeGenOpts();
- if (CGO.OptimizationLevel || Builder.GetInsertBlock() == 0)
+ if (Builder.GetInsertBlock() == 0)
return;
uint64_t XOffset = 0;
@@ -1708,7 +1601,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
ArrayType::Normal, 0);
}
- llvm::StringRef DeclName = Var->getName();
+ llvm::StringRef DeclName = D->getName();
llvm::DIDescriptor DContext =
getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
DebugFactory.CreateGlobalVariable(DContext, DeclName,
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 8397245..c16379a 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -112,7 +112,7 @@ class CGDebugInfo {
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
llvm::SmallVectorImpl<llvm::DIDescriptor> &E);
- void CollectVtableInfo(const CXXRecordDecl *Decl,
+ void CollectVTableInfo(const CXXRecordDecl *Decl,
llvm::DIFile F,
llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys);
@@ -196,13 +196,17 @@ private:
/// CreateTypeNode - Create type metadata for a source language type.
llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+ /// CreateMemberType - Create new member and increase Offset by FType's size.
+ llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
+ llvm::StringRef Name, uint64_t *Offset);
+
/// getFunctionName - Get function name for the given FunctionDecl. If the
/// name is constructred on demand (e.g. C++ destructor) then the name
/// is stored on the side.
llvm::StringRef getFunctionName(const FunctionDecl *FD);
- /// getVtableName - Get vtable name for the given Class.
- llvm::StringRef getVtableName(const CXXRecordDecl *Decl);
+ /// getVTableName - Get vtable name for the given Class.
+ llvm::StringRef getVTableName(const CXXRecordDecl *Decl);
};
} // namespace CodeGen
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 07d219f..ba3a2b4 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -31,11 +31,44 @@ using namespace CodeGen;
void CodeGenFunction::EmitDecl(const Decl &D) {
switch (D.getKind()) {
- default:
- CGM.ErrorUnsupported(&D, "decl");
- return;
+ case Decl::TranslationUnit:
+ case Decl::Namespace:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::TemplateTypeParm:
+ case Decl::UnresolvedUsingValue:
+ case Decl::NonTypeTemplateParm:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::Field:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
case Decl::ParmVar:
- assert(0 && "Parmdecls should not be in declstmts!");
+ case Decl::ImplicitParam:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TemplateTemplateParm:
+ case Decl::ObjCMethod:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCProperty:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::LinkageSpec:
+ case Decl::ObjCPropertyImpl:
+ case Decl::ObjCClass:
+ case Decl::ObjCForwardProtocol:
+ case Decl::FileScopeAsm:
+ case Decl::Friend:
+ case Decl::FriendTemplate:
+ case Decl::Block:
+
+ assert(0 && "Declaration not should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
@@ -44,6 +77,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Using: // using X; [C++]
case Decl::UsingShadow:
case Decl::UsingDirective: // using namespace X; [C++]
+ case Decl::NamespaceAlias:
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
// None of these decls require codegen support.
return;
@@ -197,6 +231,9 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage) {
+ // Bail out early if the block is unreachable.
+ if (!Builder.GetInsertBlock()) return;
+
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
@@ -205,6 +242,8 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
// Store into LocalDeclMap before generating initializer to handle
// circular references.
DMEntry = GV;
+ if (getContext().getLangOptions().CPlusPlus)
+ CGM.setStaticLocalDeclAddress(&D, GV);
// Make sure to evaluate VLA bounds now so that we have them for later.
//
@@ -610,6 +649,11 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
DtorTy = getContext().getBaseElementType(Array);
if (const RecordType *RT = DtorTy->getAs<RecordType>())
if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
if (!ClassDecl->hasTrivialDestructor()) {
const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
assert(D && "EmitLocalBlockVarDecl - destructor is nul");
@@ -622,7 +666,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
const llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(DeclPtr, BasePtr);
+ Builder.CreateBitCast(Loc, BasePtr);
EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
// Make sure to jump to the exit block.
@@ -634,20 +678,22 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
const llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(DeclPtr, BasePtr);
+ Builder.CreateBitCast(Loc, BasePtr);
EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
}
} else {
{
DelayedCleanupBlock Scope(*this);
- EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
+ Loc);
// Make sure to jump to the exit block.
EmitBranch(Scope.getCleanupExitBlock());
}
if (Exceptions) {
EHCleanupBlock Cleanup(*this);
- EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
+ Loc);
}
}
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 40c18ca..f6c805f 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -219,9 +219,14 @@ void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
SourceLocation());
// Emit the dtors, in reverse order from construction.
- for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i)
- Builder.CreateCall(DtorsAndObjects[e - i - 1].first,
- DtorsAndObjects[e - i - 1].second);
+ for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ llvm::Constant *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::CallInst *CI = Builder.CreateCall(Callee,
+ DtorsAndObjects[e - i - 1].second);
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
+ CI->setCallingConv(F->getCallingConv());
+ }
FinishFunction();
}
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 1e15066..c1d05bf 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -122,82 +122,71 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "_ZSt9terminatev");
}
-// CopyObject - Utility to copy an object. Calls copy constructor as necessary.
-// DestPtr is casted to the right type.
-static void CopyObject(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *DestPtr, llvm::Value *ExceptionPtrPtr) {
- QualType ObjectType = E->getType();
-
- // Store the throw exception in the exception object.
- if (!CGF.hasAggregateLLVMType(ObjectType)) {
- llvm::Value *Value = CGF.EmitScalarExpr(E);
- const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo();
-
- CGF.Builder.CreateStore(Value,
- CGF.Builder.CreateBitCast(DestPtr, ValuePtrTy));
- } else {
- const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo();
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
-
- llvm::Value *This = CGF.Builder.CreateBitCast(DestPtr, Ty);
- if (RD->hasTrivialCopyConstructor()) {
- CGF.EmitAggExpr(E, This, false);
- } else if (CXXConstructorDecl *CopyCtor
- = RD->getCopyConstructor(CGF.getContext(), 0)) {
- llvm::Value *CondPtr = 0;
- if (CGF.Exceptions) {
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
- llvm::Constant *FreeExceptionFn = getFreeExceptionFn(CGF);
-
- llvm::BasicBlock *CondBlock = CGF.createBasicBlock("cond.free");
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
- CondPtr = CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
- "doEHfree");
-
- CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CondPtr),
- CondBlock, Cont);
- CGF.EmitBlock(CondBlock);
-
- // Load the exception pointer.
- llvm::Value *ExceptionPtr = CGF.Builder.CreateLoad(ExceptionPtrPtr);
- CGF.Builder.CreateCall(FreeExceptionFn, ExceptionPtr);
-
- CGF.EmitBlock(Cont);
- }
-
- if (CondPtr)
- CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
- CondPtr);
-
- llvm::Value *Src = CGF.EmitLValue(E).getAddress();
-
- if (CondPtr)
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
- CondPtr);
-
- llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
- llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
- CGF.setInvokeDest(TerminateHandler);
-
- // Stolen from EmitClassAggrMemberwiseCopy
- llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
- Ctor_Complete);
- CallArgList CallArgs;
- CallArgs.push_back(std::make_pair(RValue::get(This),
- CopyCtor->getThisType(CGF.getContext())));
-
- // Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- CopyCtor->getParamDecl(0)->getType()));
- const FunctionProtoType *FPT
- = CopyCtor->getType()->getAs<FunctionProtoType>();
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, CopyCtor);
- CGF.setInvokeDest(PrevLandingPad);
- } else
- llvm_unreachable("uncopyable object");
+// Emits an exception expression into the given location. This
+// differs from EmitAnyExprToMem only in that, if a final copy-ctor
+// call is required, an exception within that copy ctor causes
+// std::terminate to be invoked.
+static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
+ llvm::Value *ExnLoc) {
+ // We want to release the allocated exception object if this
+ // expression throws. We do this by pushing an EH-only cleanup
+ // block which, furthermore, deactivates itself after the expression
+ // is complete.
+ llvm::AllocaInst *ShouldFreeVar =
+ CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
+ "should-free-exnobj.var");
+ CGF.InitTempAlloca(ShouldFreeVar,
+ llvm::ConstantInt::getFalse(CGF.getLLVMContext()));
+
+ // A variable holding the exception pointer. This is necessary
+ // because the throw expression does not necessarily dominate the
+ // cleanup, for example if it appears in a conditional expression.
+ llvm::AllocaInst *ExnLocVar =
+ CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
+
+ llvm::BasicBlock *SavedInvokeDest = CGF.getInvokeDest();
+ {
+ CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
+
+ llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
+ "should-free-exnobj");
+ CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
+ CGF.EmitBlock(FreeBB);
+ llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal);
+ CGF.EmitBlock(DoneBB);
}
+ llvm::BasicBlock *Cleanup = CGF.getInvokeDest();
+
+ CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
+ ShouldFreeVar);
+
+ // __cxa_allocate_exception returns a void*; we need to cast this
+ // to the appropriate type for the object.
+ const llvm::Type *Ty = CGF.ConvertType(E->getType())->getPointerTo();
+ llvm::Value *TypedExnLoc = CGF.Builder.CreateBitCast(ExnLoc, Ty);
+
+ // FIXME: this isn't quite right! If there's a final unelided call
+ // to a copy constructor, then according to [except.terminate]p1 we
+ // must call std::terminate() if that constructor throws, because
+ // technically that copy occurs after the exception expression is
+ // evaluated but before the exception is caught. But the best way
+ // to handle that is to teach EmitAggExpr to do the final copy
+ // differently if it can't be elided.
+ CGF.EmitAnyExprToMem(E, TypedExnLoc, /*Volatile*/ false);
+
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
+ ShouldFreeVar);
+
+ // Pop the cleanup block if it's still the top of the cleanup stack.
+ // Otherwise, temporaries have been created and our cleanup will get
+ // properly removed in time.
+ // TODO: this is not very resilient.
+ if (CGF.getInvokeDest() == Cleanup)
+ CGF.setInvokeDest(SavedInvokeDest);
}
// CopyObject - Utility to copy an object. Calls copy constructor as necessary.
@@ -270,7 +259,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
// Now allocate the exception object.
const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
- uint64_t TypeSize = getContext().getTypeSize(ThrowType) / 8;
+ uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
llvm::Value *ExceptionPtr =
@@ -278,17 +267,24 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
llvm::ConstantInt::get(SizeTy, TypeSize),
"exception");
- llvm::Value *ExceptionPtrPtr =
- CreateTempAlloca(ExceptionPtr->getType(), "exception.ptr");
- Builder.CreateStore(ExceptionPtr, ExceptionPtrPtr);
-
-
- CopyObject(*this, E->getSubExpr(), ExceptionPtr, ExceptionPtrPtr);
+ EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
// Now throw the exception.
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType);
- llvm::Constant *Dtor = llvm::Constant::getNullValue(Int8PtrTy);
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, true);
+
+ // The address of the destructor. If the exception type has a
+ // trivial destructor (or isn't a record), we just pass null.
+ llvm::Constant *Dtor = 0;
+ if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!Record->hasTrivialDestructor()) {
+ CXXDestructorDecl *DtorD = Record->getDestructor(getContext());
+ Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
+ Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
+ }
+ }
+ if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
if (getInvokeDest()) {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
@@ -375,7 +371,7 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
QualType Ty = Proto->getExceptionType(i);
QualType ExceptType
= Ty.getNonReferenceType().getUnqualifiedType();
- llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType);
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
SelectorArgs.push_back(EHType);
}
if (Proto->getNumExceptions())
@@ -498,7 +494,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
// are ignored.
QualType CaughtType = C->getCaughtType().getNonReferenceType();
llvm::Value *EHTypeInfo
- = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType());
+ = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType(), true);
SelectorArgs.push_back(EHTypeInfo);
} else {
// null indicates catch all
@@ -649,38 +645,46 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
}
CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
- llvm::BasicBlock *Cont1 = CGF.createBasicBlock("cont");
- CGF.EmitBranch(Cont1);
CGF.setInvokeDest(PreviousInvokeDest);
+ llvm::BasicBlock *EndOfCleanup = CGF.Builder.GetInsertBlock();
- CGF.EmitBlock(CleanupHandler);
-
+ // Jump to the beginning of the cleanup.
+ CGF.Builder.SetInsertPoint(CleanupHandler, CleanupHandler->begin());
+
+ // The libstdc++ personality function.
+ // TODO: generalize to work with other libraries.
llvm::Constant *Personality =
CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
(CGF.VMContext),
true),
"__gxx_personality_v0");
Personality = llvm::ConstantExpr::getBitCast(Personality, CGF.PtrToInt8Ty);
+
+ // %exception = call i8* @llvm.eh.exception()
+ // Magic intrinsic which tells gives us a handle to the caught
+ // exception.
llvm::Value *llvm_eh_exception =
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
-
llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- const llvm::IntegerType *Int8Ty;
- const llvm::PointerType *PtrToInt8Ty;
- Int8Ty = llvm::Type::getInt8Ty(CGF.VMContext);
- // C string type. Used in lots of places.
- PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+
+ llvm::Constant *Null = llvm::ConstantPointerNull::get(CGF.PtrToInt8Ty);
+
+ // %ignored = call i32 @llvm.eh.selector(i8* %exception,
+ // i8* @__gxx_personality_v0,
+ // i8* null)
+ // Magic intrinsic which tells LLVM that this invoke landing pad is
+ // just a cleanup block.
llvm::Value *Args[] = { Exc, Personality, Null };
+ llvm::Value *llvm_eh_selector =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
CGF.Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
- CGF.EmitBlock(CleanupEntryBB);
-
- CGF.EmitBlock(Cont1);
+ // And then we fall through into the code that the user put there.
+ // Jump back to the end of the cleanup.
+ CGF.Builder.SetInsertPoint(EndOfCleanup);
+ // Rethrow the exception.
if (CGF.getInvokeDest()) {
llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
CGF.Builder.CreateInvoke(getUnwindResumeOrRethrowFn(CGF), Cont,
@@ -688,10 +692,15 @@ CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
CGF.EmitBlock(Cont);
} else
CGF.Builder.CreateCall(getUnwindResumeOrRethrowFn(CGF), Exc);
-
CGF.Builder.CreateUnreachable();
- CGF.EmitBlock(Cont);
+ // Resume inserting where we started, but put the new cleanup
+ // handler in place.
+ if (PreviousInsertionBlock)
+ CGF.Builder.SetInsertPoint(PreviousInsertionBlock);
+ else
+ CGF.Builder.ClearInsertionPoint();
+
if (CGF.Exceptions)
CGF.setInvokeDest(CleanupHandler);
}
@@ -700,12 +709,11 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
if (TerminateHandler)
return TerminateHandler;
- llvm::BasicBlock *Cont = 0;
-
- if (HaveInsertPoint()) {
- Cont = createBasicBlock("cont");
- EmitBranch(Cont);
- }
+ // We don't want to change anything at the current location, so
+ // save it aside and clear the insert point.
+ llvm::BasicBlock *SavedInsertBlock = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
+ Builder.ClearInsertionPoint();
llvm::Constant *Personality =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
@@ -735,11 +743,8 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
TerminateCall->setDoesNotThrow();
Builder.CreateUnreachable();
- // Clear the insertion point to indicate we are in unreachable code.
- Builder.ClearInsertionPoint();
-
- if (Cont)
- EmitBlock(Cont);
+ // Restore the saved insertion state.
+ Builder.SetInsertPoint(SavedInsertBlock, SavedInsertPoint);
return TerminateHandler;
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 0aa4438..9ade916 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -37,6 +37,13 @@ llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
}
+void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
+ llvm::Value *Init) {
+ llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
+ llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+ Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
+}
+
llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty,
const llvm::Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
@@ -111,6 +118,23 @@ RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E,
IsInitializer);
}
+/// EmitAnyExprToMem - Evaluate an expression into a given memory
+/// location.
+void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
+ llvm::Value *Location,
+ bool IsLocationVolatile,
+ bool IsInit) {
+ if (E->getType()->isComplexType())
+ EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
+ else if (hasAggregateLLVMType(E->getType()))
+ EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit);
+ else {
+ RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
+ LValue LV = LValue::MakeAddr(Location, MakeQualifiers(E->getType()));
+ EmitStoreThroughLValue(RV, LV, E->getType());
+ }
+}
+
RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
bool IsInitializer) {
bool ShouldDestroyTemporaries = false;
@@ -150,16 +174,15 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
PopCXXTemporary();
}
} else {
- const CXXRecordDecl *BaseClassDecl = 0;
+ const CXXBaseSpecifierArray *BasePath = 0;
const CXXRecordDecl *DerivedClassDecl = 0;
if (const CastExpr *CE =
dyn_cast<CastExpr>(E->IgnoreParenNoopCasts(getContext()))) {
if (CE->getCastKind() == CastExpr::CK_DerivedToBase) {
E = CE->getSubExpr();
-
- BaseClassDecl =
- cast<CXXRecordDecl>(CE->getType()->getAs<RecordType>()->getDecl());
+
+ BasePath = &CE->getBasePath();
DerivedClassDecl =
cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
}
@@ -185,6 +208,7 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
{
DelayedCleanupBlock Scope(*this);
EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
Val.getAggregateAddr());
// Make sure to jump to the exit block.
@@ -193,6 +217,7 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
if (Exceptions) {
EHCleanupBlock Cleanup(*this);
EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false,
Val.getAggregateAddr());
}
}
@@ -201,10 +226,10 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
}
// Check if need to perform the derived-to-base cast.
- if (BaseClassDecl) {
+ if (BasePath) {
llvm::Value *Derived = Val.getAggregateAddr();
llvm::Value *Base =
- GetAddressOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl,
+ GetAddressOfBaseClass(Derived, DerivedClassDecl, *BasePath,
/*NullCheckValue=*/false);
return RValue::get(Base);
}
@@ -240,18 +265,15 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
if (!CatchUndefined)
return;
- const llvm::IntegerType *Size_tTy
+ const llvm::Type *Size_tTy
= llvm::IntegerType::get(VMContext, LLVMPointerWidth);
Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
- const llvm::Type *ResType[] = {
- Size_tTy
- };
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, ResType, 1);
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1);
+ const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1);
+
// In time, people may want to control this and use a 1 here.
- llvm::Value *Arg = llvm::ConstantInt::get(IntTy, 0);
+ llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
llvm::BasicBlock *Cont = createBasicBlock();
llvm::BasicBlock *Check = createBasicBlock();
@@ -457,6 +479,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
case Expr::BinaryOperatorClass:
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CompoundAssignOperatorClass:
+ return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
@@ -606,63 +630,73 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
QualType ExprType) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
- unsigned StartBit = Info.Start;
- unsigned BitfieldSize = Info.Size;
- llvm::Value *Ptr = LV.getBitFieldAddr();
- const llvm::Type *EltTy =
- cast<llvm::PointerType>(Ptr->getType())->getElementType();
- unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+ // Get the output type.
+ const llvm::Type *ResLTy = ConvertType(ExprType);
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
- // In some cases the bitfield may straddle two memory locations. Currently we
- // load the entire bitfield, then do the magic to sign-extend it if
- // necessary. This results in somewhat more code than necessary for the common
- // case (one load), since two shifts accomplish both the masking and sign
- // extension.
- unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
- llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp");
+ // Compute the result as an OR of all of the individual component accesses.
+ llvm::Value *Res = 0;
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
- // Shift to proper location.
- if (StartBit)
- Val = Builder.CreateLShr(Val, StartBit, "bf.lo");
+ // Get the field pointer.
+ llvm::Value *Ptr = LV.getBitFieldBaseAddr();
- // Mask off unused bits.
- llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext,
- llvm::APInt::getLowBitsSet(EltTySize, LowBits));
- Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared");
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
- // Fetch the high bits if necessary.
- if (LowBits < BitfieldSize) {
- unsigned HighBits = BitfieldSize - LowBits;
- llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi");
- llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
- LV.isVolatileQualified(),
- "tmp");
+ // Offset by the byte offset, if used.
+ if (AI.FieldByteOffset) {
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ }
- // Mask off unused bits.
- llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext,
- llvm::APInt::getLowBitsSet(EltTySize, HighBits));
- HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared");
+ // Cast to the access type.
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ ExprType.getAddressSpace());
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
- // Shift to proper location and or in to bitfield value.
- HighVal = Builder.CreateShl(HighVal, LowBits);
- Val = Builder.CreateOr(Val, HighVal, "bf.val");
- }
+ // Perform the load.
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Load->setAlignment(AI.AccessAlignment);
- // Sign extend if necessary.
- if (Info.IsSigned) {
- llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy,
- EltTySize - BitfieldSize);
- Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits),
- ExtraBits, "bf.val.sext");
+ // Shift out unused low bits and mask out unused high bits.
+ llvm::Value *Val = Load;
+ if (AI.FieldBitStart)
+ Val = Builder.CreateLShr(Load, AI.FieldBitStart);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
+ AI.TargetBitWidth),
+ "bf.clear");
+
+ // Extend or truncate to the target size.
+ if (AI.AccessWidth < ResSizeInBits)
+ Val = Builder.CreateZExt(Val, ResLTy);
+ else if (AI.AccessWidth > ResSizeInBits)
+ Val = Builder.CreateTrunc(Val, ResLTy);
+
+ // Shift into place, and OR into the result.
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateShl(Val, AI.TargetBitOffset);
+ Res = Res ? Builder.CreateOr(Res, Val) : Val;
}
- // The bitfield type and the normal type differ when the storage sizes differ
- // (currently just _Bool).
- Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp");
+ // If the bit-field is signed, perform the sign-extension.
+ //
+ // FIXME: This can easily be folded into the load of the high bits, which
+ // could also eliminate the mask of high bits in some situations.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
+ ExtraBits, "bf.val.sext");
+ }
- return RValue::get(Val);
+ return RValue::get(Res);
}
RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
@@ -783,88 +817,103 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
QualType Ty,
llvm::Value **Result) {
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
- unsigned StartBit = Info.Start;
- unsigned BitfieldSize = Info.Size;
- llvm::Value *Ptr = Dst.getBitFieldAddr();
- const llvm::Type *EltTy =
- cast<llvm::PointerType>(Ptr->getType())->getElementType();
- unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+ // Get the output type.
+ const llvm::Type *ResLTy = ConvertTypeForMem(Ty);
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
- // Get the new value, cast to the appropriate type and masked to exactly the
- // size of the bit-field.
+ // Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
- llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp");
- llvm::Constant *Mask = llvm::ConstantInt::get(VMContext,
- llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
- NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value");
+
+ if (Ty->isBooleanType())
+ SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
+
+ SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ Info.getSize()),
+ "bf.value");
// Return the new value of the bit-field, if requested.
if (Result) {
// Cast back to the proper type for result.
- const llvm::Type *SrcTy = SrcVal->getType();
- llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false,
- "bf.reload.val");
+ const llvm::Type *SrcTy = Src.getScalarVal()->getType();
+ llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
+ "bf.reload.val");
// Sign extend if necessary.
- if (Info.IsSigned) {
- unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy);
- llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy,
- SrcTySize - BitfieldSize);
- SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits),
- ExtraBits, "bf.reload.sext");
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
+ ExtraBits, "bf.reload.sext");
}
- *Result = SrcTrunc;
+ *Result = ReloadVal;
}
- // In some cases the bitfield may straddle two memory locations. Emit the low
- // part first and check to see if the high needs to be done.
- unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
- llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
- "bf.prev.low");
-
- // Compute the mask for zero-ing the low part of this bitfield.
- llvm::Constant *InvMask =
- llvm::ConstantInt::get(VMContext,
- ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits));
+ // Iterate over the components, writing each piece to memory.
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
- // Compute the new low part as
- // LowVal = (LowVal & InvMask) | (NewVal << StartBit),
- // with the shift of NewVal implicitly stripping the high bits.
- llvm::Value *NewLowVal =
- Builder.CreateShl(NewVal, StartBit, "bf.value.lo");
- LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared");
- LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo");
+ // Get the field pointer.
+ llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
- // Write back.
- Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified());
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
- // If the low part doesn't cover the bitfield emit a high part.
- if (LowBits < BitfieldSize) {
- unsigned HighBits = BitfieldSize - LowBits;
- llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi");
- llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
- Dst.isVolatileQualified(),
- "bf.prev.hi");
-
- // Compute the mask for zero-ing the high part of this bitfield.
- llvm::Constant *InvMask =
- llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize,
- HighBits));
+ // Offset by the byte offset, if used.
+ if (AI.FieldByteOffset) {
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Ptr = Builder.CreateBitCast(Ptr, i8PTy);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
+ }
- // Compute the new high part as
- // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits),
- // where the high bits of NewVal have already been cleared and the
- // shift stripping the low bits.
- llvm::Value *NewHighVal =
- Builder.CreateLShr(NewVal, LowBits, "bf.value.high");
- HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared");
- HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi");
+ // Cast to the access type.
+ const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
+ Ty.getAddressSpace());
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Extract the piece of the bit-field value to write in this access, limited
+ // to the values that are part of this access.
+ llvm::Value *Val = SrcVal;
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ AI.TargetBitWidth));
+
+ // Extend or truncate to the access size.
+ const llvm::Type *AccessLTy =
+ llvm::Type::getIntNTy(VMContext, AI.AccessWidth);
+ if (ResSizeInBits < AI.AccessWidth)
+ Val = Builder.CreateZExt(Val, AccessLTy);
+ else if (ResSizeInBits > AI.AccessWidth)
+ Val = Builder.CreateTrunc(Val, AccessLTy);
+
+ // Shift into the position in memory.
+ if (AI.FieldBitStart)
+ Val = Builder.CreateShl(Val, AI.FieldBitStart);
+
+ // If necessary, load and OR in bits that are outside of the bit-field.
+ if (AI.TargetBitWidth != AI.AccessWidth) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Load->setAlignment(AI.AccessAlignment);
+
+ // Compute the mask for zeroing the bits that are part of the bit-field.
+ llvm::APInt InvMask =
+ ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
+ AI.FieldBitStart + AI.TargetBitWidth);
+
+ // Apply the mask and OR in to the value to write.
+ Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
+ }
- // Write back.
- Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified());
+ // Write the value.
+ llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
+ Dst.isVolatileQualified());
+ if (AI.AccessAlignment)
+ Store->setAlignment(AI.AccessAlignment);
}
}
@@ -1084,6 +1133,9 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
llvm::Value *V = LocalDeclMap[VD];
+ if (!V && getContext().getLangOptions().CPlusPlus &&
+ VD->isStaticLocal())
+ V = CGM.getStaticLocalDeclAddress(VD);
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
Qualifiers Quals = MakeQualifiers(E->getType());
@@ -1474,19 +1526,7 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(Field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
-
- // FIXME: CodeGenTypes should expose a method to get the appropriate type for
- // FieldTy (the appropriate type is ABI-dependent).
- const llvm::Type *FieldTy =
- CGM.getTypes().ConvertTypeForMem(Field->getType());
- const llvm::PointerType *BaseTy =
- cast<llvm::PointerType>(BaseValue->getType());
- unsigned AS = BaseTy->getAddressSpace();
- BaseValue = Builder.CreateBitCast(BaseValue,
- llvm::PointerType::get(FieldTy, AS));
- llvm::Value *V = Builder.CreateConstGEP1_32(BaseValue, Info.FieldNo);
-
- return LValue::MakeBitfield(V, Info,
+ return LValue::MakeBitfield(BaseValue, Info,
Field->getType().getCVRQualifiers()|CVRQualifiers);
}
@@ -1548,12 +1588,7 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
const Expr* InitExpr = E->getInitializer();
LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType()));
- if (E->getType()->isComplexType())
- EmitComplexExprIntoAddr(InitExpr, DeclPtr, false);
- else if (hasAggregateLLVMType(E->getType()))
- EmitAnyExpr(InitExpr, DeclPtr, false);
- else
- EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType());
+ EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false);
return Result;
}
@@ -1647,27 +1682,19 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
E->getSubExpr()->getType()->getAs<RecordType>();
CXXRecordDecl *DerivedClassDecl =
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
-
- const RecordType *BaseClassTy = E->getType()->getAs<RecordType>();
- CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
// Perform the derived-to-base conversion
llvm::Value *Base =
GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl,
- BaseClassDecl, /*NullCheckValue=*/false);
+ E->getBasePath(), /*NullCheckValue=*/false);
return LValue::MakeAddr(Base, MakeQualifiers(E->getType()));
}
case CastExpr::CK_ToUnion:
return EmitAggExprToLValue(E);
case CastExpr::CK_BaseToDerived: {
- const RecordType *BaseClassTy =
- E->getSubExpr()->getType()->getAs<RecordType>();
- CXXRecordDecl *BaseClassDecl =
- cast<CXXRecordDecl>(BaseClassTy->getDecl());
-
const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
CXXRecordDecl *DerivedClassDecl =
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
@@ -1676,8 +1703,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// Perform the base-to-derived conversion
llvm::Value *Derived =
- GetAddressOfDerivedClass(LV.getAddress(), BaseClassDecl,
- DerivedClassDecl, /*NullCheckValue=*/false);
+ GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
+ E->getBasePath(),/*NullCheckValue=*/false);
return LValue::MakeAddr(Derived, MakeQualifiers(E->getType()));
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 4d5160f..d1b0dff 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -263,7 +263,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
std::swap(DerivedDecl, BaseDecl);
if (llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, BaseDecl)) {
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, E->getBasePath())) {
if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
SrcAdj = Builder.CreateSub(SrcAdj, Adj, "adj");
else
@@ -321,6 +321,11 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
(void) MPT;
assert(MPT->getPointeeType()->isFunctionProtoType() &&
"Unexpected member pointer type!");
+
+ // The creation of member function pointers has no side effects; if
+ // there is no destination pointer, we have nothing to do.
+ if (!DestPtr)
+ return;
const DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
const CXXMethodDecl *MD =
@@ -329,17 +334,23 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
const llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
llvm::Value *FuncPtr;
if (MD->isVirtual()) {
- int64_t Index = CGF.CGM.getVTables().getMethodVtableIndex(MD);
+ int64_t Index = CGF.CGM.getVTables().getMethodVTableIndex(MD);
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes =
+ CGF.CGM.getContext().Target.getPointerWidth(0) / 8;
+
// Itanium C++ ABI 2.3:
// For a non-virtual function, this field is a simple function pointer.
// For a virtual function, it is 1 plus the virtual table offset
// (in bytes) of the function, represented as a ptrdiff_t.
- FuncPtr = llvm::ConstantInt::get(PtrDiffTy, (Index * 8) + 1);
+ FuncPtr = llvm::ConstantInt::get(PtrDiffTy,
+ (Index * PointerWidthInBytes) + 1);
} else {
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
const llvm::Type *Ty =
@@ -738,6 +749,14 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+ // Ignore empty classes in C++.
+ if (getContext().getLangOptions().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ return;
+ }
+ }
+
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 1fd1da8..b57cdc9 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -205,20 +205,20 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
EmitBlock(FnVirtual);
- const llvm::Type *VtableTy =
+ const llvm::Type *VTableTy =
FTy->getPointerTo()->getPointerTo();
- llvm::Value *Vtable = Builder.CreateBitCast(This, VtableTy->getPointerTo());
- Vtable = Builder.CreateLoad(Vtable);
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
+ VTable = Builder.CreateLoad(VTable);
- Vtable = Builder.CreateBitCast(Vtable, Int8PtrTy);
- llvm::Value *VtableOffset =
+ VTable = Builder.CreateBitCast(VTable, Int8PtrTy);
+ llvm::Value *VTableOffset =
Builder.CreateSub(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1));
- Vtable = Builder.CreateGEP(Vtable, VtableOffset, "fn");
- Vtable = Builder.CreateBitCast(Vtable, VtableTy);
+ VTable = Builder.CreateGEP(VTable, VTableOffset, "fn");
+ VTable = Builder.CreateBitCast(VTable, VTableTy);
- llvm::Value *VirtualFn = Builder.CreateLoad(Vtable, "virtualfn");
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "virtualfn");
EmitBranch(FnEnd);
EmitBlock(FnNonVirtual);
@@ -316,17 +316,22 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
const llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Dest, BasePtr);
+ Builder.CreateBitCast(Dest, BasePtr);
EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
E->arg_begin(), E->arg_end());
}
- else
+ else {
+ CXXCtorType Type =
+ (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
+ ? Ctor_Complete : Ctor_Base;
+ bool ForVirtualBase =
+ E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
+
// Call the constructor.
- EmitCXXConstructorCall(CD,
- E->isBaseInitialization()? Ctor_Base : Ctor_Complete,
- Dest,
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
E->arg_begin(), E->arg_end());
+ }
}
static CharUnits CalculateCookiePadding(ASTContext &Ctx, QualType ElementType) {
@@ -460,18 +465,20 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *NumElements) {
if (E->isArray()) {
- if (CXXConstructorDecl *Ctor = E->getConstructor())
- CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
- E->constructor_arg_begin(),
- E->constructor_arg_end());
- return;
+ if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ if (!Ctor->getParent()->hasTrivialConstructor())
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ E->constructor_arg_begin(),
+ E->constructor_arg_end());
+ return;
+ }
}
QualType AllocType = E->getAllocatedType();
if (CXXConstructorDecl *Ctor = E->getConstructor()) {
- CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
- E->constructor_arg_begin(),
+ CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
+ NewPtr, E->constructor_arg_begin(),
E->constructor_arg_end());
return;
@@ -760,7 +767,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
// The dtor took care of deleting the object.
ShouldCallDelete = false;
} else
- EmitCXXDestructorCall(Dtor, Dtor_Complete, Ptr);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ Ptr);
}
}
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 172a77d..2595ff0 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -27,359 +27,387 @@
using namespace clang;
using namespace CodeGen;
-namespace {
+//===----------------------------------------------------------------------===//
+// ConstStructBuilder
+//===----------------------------------------------------------------------===//
+
+namespace {
class ConstStructBuilder {
CodeGenModule &CGM;
CodeGenFunction *CGF;
bool Packed;
-
unsigned NextFieldOffsetInBytes;
-
unsigned LLVMStructAlignment;
-
std::vector<llvm::Constant *> Elements;
-
+public:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ InitListExpr *ILE);
+
+private:
ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
: CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
LLVMStructAlignment(1) { }
bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
- const Expr *InitExpr) {
- uint64_t FieldOffsetInBytes = FieldOffset / 8;
+ llvm::Constant *InitExpr);
- assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
- && "Field offset mismatch!");
+ bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr);
- // Emit the field.
- llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
- if (!C)
- return false;
+ void AppendPadding(uint64_t NumBytes);
- unsigned FieldAlignment = getAlignment(C);
+ void AppendTailPadding(uint64_t RecordSize);
- // Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
- llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+ void ConvertStructToPacked();
+
+ bool Build(InitListExpr *ILE);
- if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
- assert(!Packed && "Alignment is wrong even with a packed struct!");
+ unsigned getAlignment(const llvm::Constant *C) const {
+ if (Packed) return 1;
+ return CGM.getTargetData().getABITypeAlignment(C->getType());
+ }
- // Convert the struct to a packed struct.
- ConvertStructToPacked();
-
- AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
- }
+ uint64_t getSizeInBytes(const llvm::Constant *C) const {
+ return CGM.getTargetData().getTypeAllocSize(C->getType());
+ }
+};
- if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
- // We need to append padding.
- AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
+bool ConstStructBuilder::
+AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ uint64_t FieldOffsetInBytes = FieldOffset / 8;
- assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
- "Did not add enough padding!");
+ assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
+ && "Field offset mismatch!");
- AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
- }
+ // Emit the field.
+ if (!InitCst)
+ return false;
- // Add the field.
- Elements.push_back(C);
- NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
-
- if (Packed)
- assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
- else
- LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+ unsigned FieldAlignment = getAlignment(InitCst);
- return true;
- }
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
- bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
- const Expr *InitExpr) {
- llvm::ConstantInt *CI =
- cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
- Field->getType(),
- CGF));
- // FIXME: Can this ever happen?
- if (!CI)
- return false;
-
- if (FieldOffset > NextFieldOffsetInBytes * 8) {
- // We need to add padding.
- uint64_t NumBytes =
- llvm::RoundUpToAlignment(FieldOffset -
- NextFieldOffsetInBytes * 8, 8) / 8;
+ if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
- AppendPadding(NumBytes);
- }
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
- uint64_t FieldSize =
- Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
- llvm::APInt FieldValue = CI->getValue();
+ assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
+ "Did not add enough padding!");
- // Promote the size of FieldValue if necessary
- // FIXME: This should never occur, but currently it can because initializer
- // constants are cast to bool, and because clang is not enforcing bitfield
- // width limits.
- if (FieldSize > FieldValue.getBitWidth())
- FieldValue.zext(FieldSize);
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
- // Truncate the size of FieldValue to the bit field size.
- if (FieldSize < FieldValue.getBitWidth())
- FieldValue.trunc(FieldSize);
+ // Add the field.
+ Elements.push_back(InitCst);
+ NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes +
+ getSizeInBytes(InitCst);
+
+ if (Packed)
+ assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
+ else
+ LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
- if (FieldOffset < NextFieldOffsetInBytes * 8) {
- // Either part of the field or the entire field can go into the previous
- // byte.
- assert(!Elements.empty() && "Elements can't be empty!");
+ return true;
+}
- unsigned BitsInPreviousByte =
- NextFieldOffsetInBytes * 8 - FieldOffset;
+bool ConstStructBuilder::
+ AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ llvm::ConstantInt *CI = cast_or_null<llvm::ConstantInt>(InitCst);
+ // FIXME: Can this ever happen?
+ if (!CI)
+ return false;
- bool FitsCompletelyInPreviousByte =
- BitsInPreviousByte >= FieldValue.getBitWidth();
+ if (FieldOffset > NextFieldOffsetInBytes * 8) {
+ // We need to add padding.
+ uint64_t NumBytes =
+ llvm::RoundUpToAlignment(FieldOffset -
+ NextFieldOffsetInBytes * 8, 8) / 8;
- llvm::APInt Tmp = FieldValue;
+ AppendPadding(NumBytes);
+ }
- if (!FitsCompletelyInPreviousByte) {
- unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+ uint64_t FieldSize =
+ Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
- if (CGM.getTargetData().isBigEndian()) {
- Tmp = Tmp.lshr(NewFieldWidth);
- Tmp.trunc(BitsInPreviousByte);
+ llvm::APInt FieldValue = CI->getValue();
- // We want the remaining high bits.
- FieldValue.trunc(NewFieldWidth);
- } else {
- Tmp.trunc(BitsInPreviousByte);
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue.zext(FieldSize);
- // We want the remaining low bits.
- FieldValue = FieldValue.lshr(BitsInPreviousByte);
- FieldValue.trunc(NewFieldWidth);
- }
- }
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue.trunc(FieldSize);
- Tmp.zext(8);
- if (CGM.getTargetData().isBigEndian()) {
- if (FitsCompletelyInPreviousByte)
- Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
- } else {
- Tmp = Tmp.shl(8 - BitsInPreviousByte);
- }
+ if (FieldOffset < NextFieldOffsetInBytes * 8) {
+ // Either part of the field or the entire field can go into the previous
+ // byte.
+ assert(!Elements.empty() && "Elements can't be empty!");
- // Or in the bits that go into the previous byte.
- if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back()))
- Tmp |= Val->getValue();
- else
- assert(isa<llvm::UndefValue>(Elements.back()));
+ unsigned BitsInPreviousByte =
+ NextFieldOffsetInBytes * 8 - FieldOffset;
- Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+ bool FitsCompletelyInPreviousByte =
+ BitsInPreviousByte >= FieldValue.getBitWidth();
- if (FitsCompletelyInPreviousByte)
- return true;
- }
+ llvm::APInt Tmp = FieldValue;
- while (FieldValue.getBitWidth() > 8) {
- llvm::APInt Tmp;
+ if (!FitsCompletelyInPreviousByte) {
+ unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
if (CGM.getTargetData().isBigEndian()) {
- // We want the high bits.
- Tmp = FieldValue;
- Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
- Tmp.trunc(8);
+ Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining high bits.
+ FieldValue.trunc(NewFieldWidth);
} else {
- // We want the low bits.
- Tmp = FieldValue;
- Tmp.trunc(8);
+ Tmp.trunc(BitsInPreviousByte);
- FieldValue = FieldValue.lshr(8);
+ // We want the remaining low bits.
+ FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue.trunc(NewFieldWidth);
}
-
- Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
- NextFieldOffsetInBytes++;
-
- FieldValue.trunc(FieldValue.getBitWidth() - 8);
}
- assert(FieldValue.getBitWidth() > 0 &&
- "Should have at least one bit left!");
- assert(FieldValue.getBitWidth() <= 8 &&
- "Should not have more than a byte left!");
+ Tmp.zext(8);
+ if (CGM.getTargetData().isBigEndian()) {
+ if (FitsCompletelyInPreviousByte)
+ Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+ } else {
+ Tmp = Tmp.shl(8 - BitsInPreviousByte);
+ }
- if (FieldValue.getBitWidth() < 8) {
- if (CGM.getTargetData().isBigEndian()) {
- unsigned BitWidth = FieldValue.getBitWidth();
+ // Or in the bits that go into the previous byte.
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back()))
+ Tmp |= Val->getValue();
+ else
+ assert(isa<llvm::UndefValue>(Elements.back()));
- FieldValue.zext(8);
- FieldValue = FieldValue << (8 - BitWidth);
- } else
- FieldValue.zext(8);
- }
+ Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
- // Append the last element.
- Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
- FieldValue));
- NextFieldOffsetInBytes++;
- return true;
+ if (FitsCompletelyInPreviousByte)
+ return true;
}
- void AppendPadding(uint64_t NumBytes) {
- if (!NumBytes)
- return;
+ while (FieldValue.getBitWidth() > 8) {
+ llvm::APInt Tmp;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ // We want the high bits.
+ Tmp = FieldValue;
+ Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
+ Tmp.trunc(8);
+ } else {
+ // We want the low bits.
+ Tmp = FieldValue;
+ Tmp.trunc(8);
- const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
- if (NumBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ FieldValue = FieldValue.lshr(8);
+ }
- llvm::Constant *C = llvm::UndefValue::get(Ty);
- Elements.push_back(C);
- assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+ NextFieldOffsetInBytes++;
- NextFieldOffsetInBytes += getSizeInBytes(C);
+ FieldValue.trunc(FieldValue.getBitWidth() - 8);
}
- void AppendTailPadding(uint64_t RecordSize) {
- assert(RecordSize % 8 == 0 && "Invalid record size!");
+ assert(FieldValue.getBitWidth() > 0 &&
+ "Should have at least one bit left!");
+ assert(FieldValue.getBitWidth() <= 8 &&
+ "Should not have more than a byte left!");
- uint64_t RecordSizeInBytes = RecordSize / 8;
- assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+ if (FieldValue.getBitWidth() < 8) {
+ if (CGM.getTargetData().isBigEndian()) {
+ unsigned BitWidth = FieldValue.getBitWidth();
- unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
- AppendPadding(NumPadBytes);
+ FieldValue.zext(8);
+ FieldValue = FieldValue << (8 - BitWidth);
+ } else
+ FieldValue.zext(8);
}
- void ConvertStructToPacked() {
- std::vector<llvm::Constant *> PackedElements;
- uint64_t ElementOffsetInBytes = 0;
+ // Append the last element.
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+ FieldValue));
+ NextFieldOffsetInBytes++;
+ return true;
+}
- for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
- llvm::Constant *C = Elements[i];
+void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
+ if (!NumBytes)
+ return;
- unsigned ElementAlign =
- CGM.getTargetData().getABITypeAlignment(C->getType());
- uint64_t AlignedElementOffsetInBytes =
- llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
- if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
- // We need some padding.
- uint64_t NumBytes =
- AlignedElementOffsetInBytes - ElementOffsetInBytes;
+ llvm::Constant *C = llvm::UndefValue::get(Ty);
+ Elements.push_back(C);
+ assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
- const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
- if (NumBytes > 1)
- Ty = llvm::ArrayType::get(Ty, NumBytes);
+ NextFieldOffsetInBytes += getSizeInBytes(C);
+}
- llvm::Constant *Padding = llvm::UndefValue::get(Ty);
- PackedElements.push_back(Padding);
- ElementOffsetInBytes += getSizeInBytes(Padding);
- }
+void ConstStructBuilder::AppendTailPadding(uint64_t RecordSize) {
+ assert(RecordSize % 8 == 0 && "Invalid record size!");
- PackedElements.push_back(C);
- ElementOffsetInBytes += getSizeInBytes(C);
- }
+ uint64_t RecordSizeInBytes = RecordSize / 8;
+ assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
- assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
- "Packing the struct changed its size!");
+ unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ AppendPadding(NumPadBytes);
+}
- Elements = PackedElements;
- LLVMStructAlignment = 1;
- Packed = true;
- }
-
- bool Build(InitListExpr *ILE) {
- RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
-
- unsigned FieldNo = 0;
- unsigned ElementNo = 0;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
- ElementNo < ILE->getNumInits() && Field != FieldEnd;
- ++Field, ++FieldNo) {
- if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
- continue;
-
- if (Field->isBitField()) {
- if (!Field->getIdentifier())
- continue;
-
- if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
- ILE->getInit(ElementNo)))
- return false;
- } else {
- if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
- ILE->getInit(ElementNo)))
- return false;
- }
+void ConstStructBuilder::ConvertStructToPacked() {
+ std::vector<llvm::Constant *> PackedElements;
+ uint64_t ElementOffsetInBytes = 0;
- ElementNo++;
- }
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ llvm::Constant *C = Elements[i];
- uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+ unsigned ElementAlign =
+ CGM.getTargetData().getABITypeAlignment(C->getType());
+ uint64_t AlignedElementOffsetInBytes =
+ llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
- if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
- // If the struct is bigger than the size of the record type,
- // we must have a flexible array member at the end.
- assert(RD->hasFlexibleArrayMember() &&
- "Must have flexible array member if struct is bigger than type!");
-
- // No tail padding is necessary.
- return true;
- }
+ if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
+ // We need some padding.
+ uint64_t NumBytes =
+ AlignedElementOffsetInBytes - ElementOffsetInBytes;
- uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
- LLVMStructAlignment);
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
- // Check if we need to convert the struct to a packed struct.
- if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
- LLVMSizeInBytes > LayoutSizeInBytes) {
- assert(!Packed && "Size mismatch!");
-
- ConvertStructToPacked();
- assert(NextFieldOffsetInBytes == LayoutSizeInBytes &&
- "Converting to packed did not help!");
+ llvm::Constant *Padding = llvm::UndefValue::get(Ty);
+ PackedElements.push_back(Padding);
+ ElementOffsetInBytes += getSizeInBytes(Padding);
}
- // Append tail padding if necessary.
- AppendTailPadding(Layout.getSize());
-
- assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
- "Tail padding mismatch!");
-
- return true;
+ PackedElements.push_back(C);
+ ElementOffsetInBytes += getSizeInBytes(C);
}
- unsigned getAlignment(const llvm::Constant *C) const {
- if (Packed)
- return 1;
+ assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
+ "Packing the struct changed its size!");
- return CGM.getTargetData().getABITypeAlignment(C->getType());
+ Elements = PackedElements;
+ LLVMStructAlignment = 1;
+ Packed = true;
+}
+
+bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ unsigned FieldNo = 0;
+ unsigned ElementNo = 0;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isBitField() && !Field->getIdentifier())
+ continue;
+
+ // Get the initializer. A struct can include fields without initializers,
+ // we just use explicit null values for them.
+ llvm::Constant *EltInit;
+ if (ElementNo < ILE->getNumInits())
+ EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
+ Field->getType(), CGF);
+ else
+ EltInit = CGM.EmitNullConstant(Field->getType());
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
+ return false;
+ } else {
+ // Otherwise we have a bitfield.
+ if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
+ return false;
+ }
}
- uint64_t getSizeInBytes(const llvm::Constant *C) const {
- return CGM.getTargetData().getTypeAllocSize(C->getType());
+ uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+
+ if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
+ // If the struct is bigger than the size of the record type,
+ // we must have a flexible array member at the end.
+ assert(RD->hasFlexibleArrayMember() &&
+ "Must have flexible array member if struct is bigger than type!");
+
+ // No tail padding is necessary.
+ return true;
}
-public:
- static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
- InitListExpr *ILE) {
- ConstStructBuilder Builder(CGM, CGF);
+ uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
+ LLVMStructAlignment);
- if (!Builder.Build(ILE))
- return 0;
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ LLVMSizeInBytes > LayoutSizeInBytes) {
+ assert(!Packed && "Size mismatch!");
+
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ "Converting to packed did not help!");
+ }
- llvm::Constant *Result =
- llvm::ConstantStruct::get(CGM.getLLVMContext(),
- Builder.Elements, Builder.Packed);
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
- assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
- Builder.getAlignment(Result)) ==
- Builder.getSizeInBytes(Result) && "Size mismatch!");
+ assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+ "Tail padding mismatch!");
- return Result;
- }
-};
+ return true;
+}
+
+llvm::Constant *ConstStructBuilder::
+ BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ llvm::Constant *Result =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ Builder.Elements, Builder.Packed);
+
+ assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
+ Builder.getAlignment(Result)) ==
+ Builder.getSizeInBytes(Result) && "Size mismatch!");
+
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// ConstExprEmitter
+//===----------------------------------------------------------------------===//
+
class ConstExprEmitter :
public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
CodeGenModule &CGM;
@@ -418,13 +446,18 @@ public:
// Get the function pointer (or index if this is a virtual function).
if (MD->isVirtual()) {
- uint64_t Index = CGM.getVTables().getMethodVtableIndex(MD);
+ uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes =
+ CGM.getContext().Target.getPointerWidth(0) / 8;
+
// Itanium C++ ABI 2.3:
// For a non-virtual function, this field is a simple function pointer.
// For a virtual function, it is 1 plus the virtual table offset
// (in bytes) of the function, represented as a ptrdiff_t.
- Values[0] = llvm::ConstantInt::get(PtrDiffTy, (Index * 8) + 1);
+ Values[0] = llvm::ConstantInt::get(PtrDiffTy,
+ (Index * PointerWidthInBytes) + 1);
} else {
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
const llvm::Type *Ty =
@@ -528,8 +561,6 @@ public:
const MemberPointerType *DestTy =
E->getType()->getAs<MemberPointerType>();
- const CXXRecordDecl *BaseClass =
- cast<CXXRecordDecl>(cast<RecordType>(SrcTy->getClass())->getDecl());
const CXXRecordDecl *DerivedClass =
cast<CXXRecordDecl>(cast<RecordType>(DestTy->getClass())->getDecl());
@@ -543,7 +574,7 @@ public:
// Check if we need to update the adjustment.
if (llvm::Constant *Offset =
- CGM.GetNonVirtualBaseClassOffset(DerivedClass, BaseClass)) {
+ CGM.GetNonVirtualBaseClassOffset(DerivedClass, E->getBasePath())) {
llvm::Constant *Values[2];
Values[0] = CS->getOperand(0);
@@ -587,17 +618,15 @@ public:
}
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
- std::vector<llvm::Constant*> Elts;
- const llvm::ArrayType *AType =
- cast<llvm::ArrayType>(ConvertType(ILE->getType()));
unsigned NumInitElements = ILE->getNumInits();
- // FIXME: Check for wide strings
- // FIXME: Check for NumInitElements exactly equal to 1??
- if (NumInitElements > 0 &&
+ if (NumInitElements == 1 &&
(isa<StringLiteral>(ILE->getInit(0)) ||
- isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
- ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
+ isa<ObjCEncodeExpr>(ILE->getInit(0))))
return Visit(ILE->getInit(0));
+
+ std::vector<llvm::Constant*> Elts;
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
const llvm::Type *ElemTy = AType->getElementType();
unsigned NumElements = AType->getNumElements();
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index d1c0f8d..9849688 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -133,6 +133,7 @@ public:
CGF.getContext().typesAreCompatible(
E->getArgType1(), E->getArgType2()));
}
+ Value *VisitOffsetOfExpr(const OffsetOfExpr *E);
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
@@ -242,7 +243,7 @@ public:
return Visit(E->getSubExpr());
}
Value *VisitUnaryOffsetOf(const UnaryOperator *E);
-
+
// C++
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
@@ -314,6 +315,10 @@ public:
}
BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
+ Value *&BitFieldResult);
+
Value *EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
@@ -818,16 +823,12 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return Visit(const_cast<Expr*>(E));
case CastExpr::CK_BaseToDerived: {
- const CXXRecordDecl *BaseClassDecl =
- E->getType()->getCXXRecordDeclForPointerType();
const CXXRecordDecl *DerivedClassDecl =
DestTy->getCXXRecordDeclForPointerType();
- Value *Src = Visit(const_cast<Expr*>(E));
-
- bool NullCheckValue = ShouldNullCheckClassCastValue(CE);
- return CGF.GetAddressOfDerivedClass(Src, BaseClassDecl, DerivedClassDecl,
- NullCheckValue);
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ CE->getBasePath(),
+ ShouldNullCheckClassCastValue(CE));
}
case CastExpr::CK_UncheckedDerivedToBase:
case CastExpr::CK_DerivedToBase: {
@@ -836,15 +837,9 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
CXXRecordDecl *DerivedClassDecl =
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
- const RecordType *BaseClassTy =
- DestTy->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
- CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl());
-
- Value *Src = Visit(const_cast<Expr*>(E));
-
- bool NullCheckValue = ShouldNullCheckClassCastValue(CE);
- return CGF.GetAddressOfBaseClass(Src, DerivedClassDecl, BaseClassDecl,
- NullCheckValue);
+ return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
+ CE->getBasePath(),
+ ShouldNullCheckClassCastValue(CE));
}
case CastExpr::CK_Dynamic: {
Value *V = Visit(const_cast<Expr*>(E));
@@ -894,7 +889,8 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
std::swap(DerivedDecl, BaseDecl);
if (llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, BaseDecl)) {
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
+ CE->getBasePath())) {
if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
Src = Builder.CreateSub(Src, Adj, "adj");
else
@@ -1035,6 +1031,21 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
}
+Value *ScalarExprEmitter::VisitOffsetOfExpr(const OffsetOfExpr *E) {
+ Expr::EvalResult Result;
+ if(E->Evaluate(Result, CGF.getContext()))
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+
+ // FIXME: Cannot support code generation for non-constant offsetof.
+ unsigned DiagID = CGF.CGM.getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile non-constant __builtin_offsetof");
+ CGF.CGM.getDiags().Report(CGF.getContext().getFullLoc(E->getLocStart()),
+ DiagID)
+ << E->getSourceRange();
+
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
/// argument of the sizeof expression as an integer.
Value *
@@ -1103,22 +1114,24 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
return Result;
}
-Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
- Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
- bool Ignore = TestAndClearIgnoreResultAssign();
+LValue ScalarExprEmitter::EmitCompoundAssignLValue(
+ const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
+ Value *&BitFieldResult) {
QualType LHSTy = E->getLHS()->getType();
-
+ BitFieldResult = 0;
BinOpInfo OpInfo;
-
+
if (E->getComputationResultType()->isAnyComplexType()) {
// This needs to go through the complex expression emitter, but it's a tad
// complicated to do that... I'm leaving it out for now. (Note that we do
// actually need the imaginary part of the RHS for multiplication and
// division.)
CGF.ErrorUnsupported(E, "complex compound assignment");
- return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ return LValue();
}
-
+
// Emit the RHS first. __block variables need to have the rhs evaluated
// first, plus this should improve codegen a little.
OpInfo.RHS = Visit(E->getRHS());
@@ -1129,13 +1142,13 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
-
+
// Expand the binary operator.
Value *Result = (this->*Func)(OpInfo);
-
+
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
-
+
// Store the result value into the LHS lvalue. Bit-fields are handled
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after the
@@ -1144,11 +1157,23 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
if (!LHSLV.isVolatileQualified()) {
CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
&Result);
- return Result;
+ BitFieldResult = Result;
+ return LHSLV;
} else
CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
} else
CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+ return LHSLV;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ Value *BitFieldResult;
+ LValue LHSLV = EmitCompoundAssignLValue(E, Func, BitFieldResult);
+ if (BitFieldResult)
+ return BitFieldResult;
+
if (Ignore)
return 0;
return EmitLoadOfLValue(LHSLV, E->getType());
@@ -1914,3 +1939,53 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
return LV;
}
+
+LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
+ const CompoundAssignOperator *E) {
+ ScalarExprEmitter Scalar(*this);
+ Value *BitFieldResult = 0;
+ switch (E->getOpcode()) {
+#define COMPOUND_OP(Op) \
+ case BinaryOperator::Op##Assign: \
+ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
+ BitFieldResult)
+ COMPOUND_OP(Mul);
+ COMPOUND_OP(Div);
+ COMPOUND_OP(Rem);
+ COMPOUND_OP(Add);
+ COMPOUND_OP(Sub);
+ COMPOUND_OP(Shl);
+ COMPOUND_OP(Shr);
+ COMPOUND_OP(And);
+ COMPOUND_OP(Xor);
+ COMPOUND_OP(Or);
+#undef COMPOUND_OP
+
+ case BinaryOperator::PtrMemD:
+ case BinaryOperator::PtrMemI:
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr:
+ case BinaryOperator::Assign:
+ case BinaryOperator::Comma:
+ assert(false && "Not valid compound assignment operators");
+ break;
+ }
+
+ llvm_unreachable("Unhandled compound assignment operator");
+}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 206d438..3359250 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -53,31 +53,36 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
// arguments in generic code.
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
- const Expr *ReceiverExpr = E->getReceiver();
bool isSuperMessage = false;
bool isClassMessage = false;
+ ObjCInterfaceDecl *OID = 0;
// Find the receiver
- llvm::Value *Receiver;
- if (!ReceiverExpr) {
- const ObjCInterfaceDecl *OID = E->getClassInfo().Decl;
-
- // Very special case, super send in class method. The receiver is
- // self (the class object) and the send uses super semantics.
- if (!OID) {
- assert(E->getClassName()->isStr("super") &&
- "Unexpected missing class interface in message send.");
- isSuperMessage = true;
- Receiver = LoadObjCSelf();
- } else {
- Receiver = Runtime.GetClass(Builder, OID);
- }
-
+ llvm::Value *Receiver = 0;
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class: {
+ const ObjCInterfaceType *IFace
+ = E->getClassReceiver()->getAs<ObjCInterfaceType>();
+ OID = IFace->getDecl();
+ assert(IFace && "Invalid Objective-C class message send");
+ Receiver = Runtime.GetClass(Builder, OID);
isClassMessage = true;
- } else if (isa<ObjCSuperExpr>(E->getReceiver())) {
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ Receiver = LoadObjCSelf();
isSuperMessage = true;
+ break;
+
+ case ObjCMessageExpr::SuperClass:
Receiver = LoadObjCSelf();
- } else {
- Receiver = EmitScalarExpr(E->getReceiver());
+ isSuperMessage = true;
+ isClassMessage = true;
+ break;
}
CallArgList Args;
@@ -98,7 +103,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
}
return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
- Receiver, isClassMessage, Args,
+ Receiver, Args, OID,
E->getMethodDecl());
}
@@ -148,19 +153,21 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ bool IsAtomic =
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
// FIXME: This is rather murky, we create this here since they will not have
// been created by Sema for us.
OMD->createImplicitParams(getContext(), IMP->getClassInterface());
StartObjCMethod(OMD, IMP->getClassInterface());
-
+
// Determine if we should use an objc_getProperty call for
// this. Non-atomic properties are directly evaluated.
// atomic 'copy' and 'retain' properties are also directly
// evaluated in gc-only mode.
if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
- !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ IsAtomic &&
(PD->getSetterKind() == ObjCPropertyDecl::Copy ||
PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
llvm::Value *GetPropertyFn =
@@ -208,7 +215,44 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
}
else if (hasAggregateLLVMType(Ivar->getType())) {
- EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ bool IsStrong = false;
+ if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(Ivar->getType())))
+ && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
+ && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetCopyStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList Args;
+ RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue,
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *isAtomic =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsAtomic ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(isAtomic),
+ getContext().BoolTy));
+ llvm::Value *hasStrong =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
+ IsStrong ? 1 : 0);
+ Args.push_back(std::make_pair(RValue::get(hasStrong),
+ getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
+ }
+ else
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
} else {
CodeGenTypes &Types = CGM.getTypes();
RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
@@ -289,6 +333,41 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
FunctionType::ExtInfo()),
SetPropertyFn,
ReturnValueSlot(), Args);
+ } else if (IsAtomic && hasAggregateLLVMType(Ivar->getType()) &&
+ !Ivar->getType()->isAnyComplexType() &&
+ IndirectObjCSetterArg(*CurFnInfo)
+ && CGM.getObjCRuntime().GetCopyStructFunction()) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ llvm::Value *GetCopyStructFn =
+ CGM.getObjCRuntime().GetCopyStructFunction();
+ CodeGenTypes &Types = CGM.getTypes();
+ CallArgList Args;
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+ RValue RV = RValue::get(Builder.CreateBitCast(LV.getAddress(),
+ Types.ConvertType(getContext().VoidPtrTy)));
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsPtrTy =
+ Builder.CreateBitCast(Arg,
+ Types.ConvertType(getContext().VoidPtrTy));
+ RV = RValue::get(ArgAsPtrTy);
+ Args.push_back(std::make_pair(RV, getContext().VoidPtrTy));
+ // sizeof (Type of Ivar)
+ uint64_t Size = getContext().getTypeSize(Ivar->getType()) / 8;
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy), Size);
+ Args.push_back(std::make_pair(RValue::get(SizeVal),
+ getContext().LongTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ Args.push_back(std::make_pair(RValue::get(False), getContext().BoolTy));
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo()),
+ GetCopyStructFn, ReturnValueSlot(), Args);
} else {
// FIXME: Find a clean way to avoid AST node creation.
SourceLocation Loc = PD->getLocation();
@@ -304,7 +383,7 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
if (getContext().getCanonicalType(Ivar->getType()) !=
getContext().getCanonicalType(ArgDecl->getType())) {
ImplicitCastExpr ArgCasted(Ivar->getType(), CastExpr::CK_BitCast, &Arg,
- false);
+ CXXBaseSpecifierArray(), false);
BinaryOperator Assign(&IvarRef, &ArgCasted, BinaryOperator::Assign,
Ivar->getType(), Loc);
EmitStmt(&Assign);
@@ -318,6 +397,83 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
FinishFunction();
}
+void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD,
+ bool ctor) {
+ llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> IvarInitializers;
+ MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
+ StartObjCMethod(MD, IMP->getClassInterface());
+ for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
+ E = IMP->init_end(); B != E; ++B) {
+ CXXBaseOrMemberInitializer *Member = (*B);
+ IvarInitializers.push_back(Member);
+ }
+ if (ctor) {
+ for (unsigned I = 0, E = IvarInitializers.size(); I != E; ++I) {
+ CXXBaseOrMemberInitializer *IvarInit = IvarInitializers[I];
+ FieldDecl *Field = IvarInit->getMember();
+ QualType FieldType = Field->getType();
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ EmitAggExpr(IvarInit->getInit(), LV.getAddress(),
+ LV.isVolatileQualified(), false, true);
+ }
+ // constructor returns 'self'.
+ CodeGenTypes &Types = CGM.getTypes();
+ QualType IdTy(CGM.getContext().getObjCIdType());
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
+ }
+ else {
+ // dtor
+ for (size_t i = IvarInitializers.size(); i > 0; --i) {
+ FieldDecl *Field = IvarInitializers[i - 1]->getMember();
+ QualType FieldType = Field->getType();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LV.getAddress(), BasePtr);
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ Array, BaseAddrPtr);
+ }
+ else
+ EmitCXXDestructorCall(FieldClassDecl->getDestructor(CGM.getContext()),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ LV.getAddress());
+ }
+ }
+ FinishFunction();
+}
+
+bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
+ it++; it++;
+ const ABIArgInfo &AI = it->info;
+ // FIXME. Is this sufficient check?
+ return (AI.getKind() == ABIArgInfo::Indirect);
+}
+
+bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return false;
+ if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
+ return FDTTy->getDecl()->hasObjectMember();
+ return false;
+}
+
llvm::Value *CodeGenFunction::LoadObjCSelf() {
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
// See if we need to lazily forward self inside a block literal.
@@ -360,7 +516,7 @@ RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
return CGM.getObjCRuntime().
GenerateMessageSend(*this, Exp->getType(), S,
EmitScalarExpr(E->getBase()),
- false, CallArgList());
+ CallArgList());
} else {
const ObjCImplicitSetterGetterRefExpr *KE =
cast<ObjCImplicitSetterGetterRefExpr>(Exp);
@@ -376,7 +532,7 @@ RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
return CGM.getObjCRuntime().
GenerateMessageSend(*this, Exp->getType(), S,
Receiver,
- KE->getInterfaceDecl() != 0, CallArgList());
+ CallArgList(), KE->getInterfaceDecl());
}
}
@@ -413,7 +569,7 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
Args.push_back(std::make_pair(Src, E->getType()));
CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
EmitScalarExpr(E->getBase()),
- false, Args);
+ Args);
} else if (const ObjCImplicitSetterGetterRefExpr *E =
dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
Selector S = E->getSetterMethod()->getSelector();
@@ -430,7 +586,7 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
Args.push_back(std::make_pair(Src, E->getType()));
CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
Receiver,
- E->getInterfaceDecl() != 0, Args);
+ Args, E->getInterfaceDecl());
} else
assert (0 && "bad expression node in EmitObjCPropertySet");
}
@@ -498,7 +654,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
CGM.getObjCRuntime().GenerateMessageSend(*this,
getContext().UnsignedLongTy,
FastEnumSel,
- Collection, false, Args);
+ Collection, Args);
llvm::Value *LimitPtr = CreateMemTemp(getContext().UnsignedLongTy,
"limit.ptr");
@@ -623,7 +779,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
CGM.getObjCRuntime().GenerateMessageSend(*this,
getContext().UnsignedLongTy,
FastEnumSel,
- Collection, false, Args);
+ Collection, Args);
Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
Limit = Builder.CreateLoad(LimitPtr);
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index d445200..3c51b7e 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -26,6 +26,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Module.h"
+#include "llvm/LLVMContext.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Compiler.h"
@@ -81,6 +82,8 @@ private:
llvm::Constant *Zeros[2];
llvm::Constant *NULLPtr;
llvm::LLVMContext &VMContext;
+ /// Metadata kind used to tie method lookups to message sends.
+ unsigned msgSendMDKind;
private:
llvm::Constant *GenerateIvarList(
const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
@@ -112,7 +115,8 @@ private:
llvm::Constant *Methods,
llvm::Constant *Protocols,
llvm::Constant *IvarOffsets,
- llvm::Constant *Properties);
+ llvm::Constant *Properties,
+ bool isMeta=false);
llvm::Constant *GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
@@ -141,8 +145,8 @@ public:
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
- bool IsClassMessage,
const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method);
virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
@@ -170,6 +174,7 @@ public:
virtual llvm::Function *ModuleInitFunction();
virtual llvm::Function *GetPropertyGetFunction();
virtual llvm::Function *GetPropertySetFunction();
+ virtual llvm::Function *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
@@ -221,10 +226,6 @@ void CGObjCGNU::EmitClassRef(const std::string &className) {
llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
}
-static std::string SymbolNameForClass(const std::string &ClassName) {
- return "_OBJC_CLASS_" + ClassName;
-}
-
static std::string SymbolNameForMethod(const std::string &ClassName, const
std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
{
@@ -238,6 +239,9 @@ static std::string SymbolNameForMethod(const std::string &ClassName, const
CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
: CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
MetaClassPtrAlias(0), VMContext(cgm.getLLVMContext()) {
+
+ msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+
IntTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
LongTy = cast<llvm::IntegerType>(
@@ -452,12 +456,15 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
return RValue::get(0);
}
}
- llvm::Value *cmd = GetSelector(CGF.Builder, Sel);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *cmd = GetSelector(Builder, Sel);
+
CallArgList ActualArgs;
ActualArgs.push_back(
- std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
+ std::make_pair(RValue::get(Builder.CreateBitCast(Receiver, IdTy)),
ASTIdTy));
ActualArgs.push_back(std::make_pair(RValue::get(cmd),
CGF.getContext().getObjCSelType()));
@@ -481,7 +488,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
IdTy, Params, true), "objc_get_class");
}
- ReceiverClass = CGF.Builder.CreateCall(classLookupFunction,
+ ReceiverClass = Builder.CreateCall(classLookupFunction,
MakeConstantString(Class->getNameAsString()));
} else {
// Set up global aliases for the metaclass or class pointer if they do not
@@ -506,36 +513,64 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
}
// Cast the pointer to a simplified version of the class structure
- ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
llvm::PointerType::getUnqual(
llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
// Get the superclass pointer
- ReceiverClass = CGF.Builder.CreateStructGEP(ReceiverClass, 1);
+ ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1);
// Load the superclass pointer
- ReceiverClass = CGF.Builder.CreateLoad(ReceiverClass);
+ ReceiverClass = Builder.CreateLoad(ReceiverClass);
// Construct the structure used to look up the IMP
llvm::StructType *ObjCSuperTy = llvm::StructType::get(VMContext,
Receiver->getType(), IdTy, NULL);
- llvm::Value *ObjCSuper = CGF.Builder.CreateAlloca(ObjCSuperTy);
+ llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
- CGF.Builder.CreateStore(Receiver, CGF.Builder.CreateStructGEP(ObjCSuper, 0));
- CGF.Builder.CreateStore(ReceiverClass,
- CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+ Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
+ Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
// Get the IMP
std::vector<const llvm::Type*> Params;
Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
Params.push_back(SelectorTy);
+
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+ llvm::Value *imp;
+
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ // The lookup function returns a slot, which can be safely cached.
+ llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
+ IntTy, llvm::PointerType::getUnqual(impType), NULL);
+
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(SlotTy), Params, true),
+ "objc_slot_lookup_super");
+
+ llvm::CallInst *slot = Builder.CreateCall(lookupFunction, lookupArgs,
+ lookupArgs+2);
+ slot->setOnlyReadsMemory();
+
+ imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+ } else {
llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
"objc_msg_lookup_super");
+ imp = Builder.CreateCall(lookupFunction, lookupArgs, lookupArgs+2);
+ }
- llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
- llvm::Value *imp = CGF.Builder.CreateCall(lookupFunction, lookupArgs,
- lookupArgs+2);
-
- return CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs);
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+ return msgRet;
}
/// Generate code for a message send expression.
@@ -544,9 +579,10 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
- bool IsClassMessage,
const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
+ // Strip out message sends to retain / release in GC mode
if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
return RValue::get(Receiver);
@@ -555,7 +591,39 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
return RValue::get(0);
}
}
+
CGBuilderTy &Builder = CGF.Builder;
+
+ // If the return type is something that goes in an integer register, the
+ // runtime will handle 0 returns. For other cases, we fill in the 0 value
+ // ourselves.
+ //
+ // The language spec says the result of this kind of message send is
+ // undefined, but lots of people seem to have forgotten to read that
+ // paragraph and insist on sending messages to nil that have structure
+ // returns. With GCC, this generates a random return value (whatever happens
+ // to be on the stack / in those registers at the time) on most platforms,
+ // and generates a SegV on SPARC. With LLVM it corrupts the stack.
+ bool isPointerSizedReturn = false;
+ if (ResultType->isAnyPointerType() || ResultType->isIntegralType() ||
+ ResultType->isVoidType())
+ isPointerSizedReturn = true;
+
+ llvm::BasicBlock *startBB = 0;
+ llvm::BasicBlock *messageBB = 0;
+ llvm::BasicBlock *contiueBB = 0;
+
+ if (!isPointerSizedReturn) {
+ startBB = Builder.GetInsertBlock();
+ messageBB = CGF.createBasicBlock("msgSend");
+ contiueBB = CGF.createBasicBlock("continue");
+
+ llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
+ llvm::Constant::getNullValue(Receiver->getType()));
+ Builder.CreateCondBr(isNil, contiueBB, messageBB);
+ CGF.EmitBlock(messageBB);
+ }
+
IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
llvm::Value *cmd;
if (Method)
@@ -577,6 +645,14 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
const llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), Class!=0)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
+
+
llvm::Value *imp;
// For sender-aware dispatch, we pass the sender as the third argument to a
// lookup function. When sending messages from C code, the sender is nil.
@@ -611,9 +687,13 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
LookupFn->setDoesNotCapture(1);
}
- llvm::Value *slot =
+ llvm::CallInst *slot =
Builder.CreateCall3(lookupFunction, ReceiverPtr, cmd, self);
+ slot->setOnlyReadsMemory();
+ slot->setMetadata(msgSendMDKind, node);
+
imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+
// The lookup function may have changed the receiver, so make sure we use
// the new one.
ActualArgs[0] =
@@ -628,9 +708,46 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
"objc_msg_lookup");
imp = Builder.CreateCall2(lookupFunction, Receiver, cmd);
+ cast<llvm::CallInst>(imp)->setMetadata(msgSendMDKind, node);
}
-
- return CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs);
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+
+ if (!isPointerSizedReturn) {
+ CGF.EmitBlock(contiueBB);
+ if (msgRet.isScalar()) {
+ llvm::Value *v = msgRet.getScalarVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
+ msgRet = RValue::get(phi);
+ } else if (msgRet.isAggregate()) {
+ llvm::Value *v = msgRet.getAggregateAddr();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType());
+ const llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
+ llvm::AllocaInst *NullVal =
+ CGF.CreateTempAlloca(RetTy->getElementType(), "null");
+ CGF.InitTempAlloca(NullVal,
+ llvm::Constant::getNullValue(RetTy->getElementType()));
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(NullVal, startBB);
+ msgRet = RValue::getAggregate(phi);
+ } else /* isComplex() */ {
+ std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v.first->getType());
+ phi->addIncoming(v.first, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
+ startBB);
+ llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType());
+ phi2->addIncoming(v.second, messageBB);
+ phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
+ startBB);
+ msgRet = RValue::getComplex(phi, phi2);
+ }
+ }
+ return msgRet;
}
/// Generates a MethodList. Used in construction of a objc_class and
@@ -749,7 +866,8 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
llvm::Constant *Methods,
llvm::Constant *Protocols,
llvm::Constant *IvarOffsets,
- llvm::Constant *Properties) {
+ llvm::Constant *Properties,
+ bool isMeta) {
// Set up the class structure
// Note: Several of these are char*s when they should be ids. This is
// because the runtime performs this translation on load.
@@ -799,8 +917,8 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// Create an instance of the structure
// This is now an externally visible symbol, so that we can speed up class
// messages in the next ABI.
- return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name),
- llvm::GlobalValue::ExternalLinkage);
+ return MakeGlobal(ClassTy, Elements, (isMeta ? "_OBJC_METACLASS_":
+ "_OBJC_CLASS_") + std::string(Name), llvm::GlobalValue::ExternalLinkage);
}
llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
@@ -1289,16 +1407,21 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
- for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
- endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+
+ // Collect declared and synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ CGM.getContext().ShallowCollectObjCIvars(ClassDecl, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
// Store the name
- IvarNames.push_back(MakeConstantString((*iter)->getNameAsString()));
+ IvarNames.push_back(MakeConstantString(IVD->getNameAsString()));
// Get the type encoding for this ivar
std::string TypeStr;
- Context.getObjCEncodingForType((*iter)->getType(), TypeStr);
+ Context.getObjCEncodingForType(IVD->getType(), TypeStr);
IvarTypes.push_back(MakeConstantString(TypeStr));
// Get the offset
- uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter);
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
uint64_t Offset = BaseOffset;
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
Offset = BaseOffset - superInstanceSize;
@@ -1309,7 +1432,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
false, llvm::GlobalValue::ExternalLinkage,
llvm::ConstantInt::get(IntTy, BaseOffset),
"__objc_ivar_offset_value_" + ClassName +"." +
- (*iter)->getNameAsString()));
+ IVD->getNameAsString()));
}
llvm::Constant *IvarOffsetArrayInit =
llvm::ConstantArray::get(llvm::ArrayType::get(PtrToIntTy,
@@ -1411,7 +1534,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
//Generate metaclass for class methods
llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
NULLPtr, 0x12L, ClassName.c_str(), 0, Zeros[0], GenerateIvarList(
- empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr);
+ empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr, true);
// Generate the class structure
llvm::Constant *ClassStruct =
@@ -1658,10 +1781,9 @@ llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Params.push_back(IdTy);
Params.push_back(SelectorTy);
- // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
- Params.push_back(LongTy);
+ Params.push_back(IntTy);
Params.push_back(BoolTy);
- // void objc_getProperty (id, SEL, ptrdiff_t, bool)
+ // void objc_getProperty (id, SEL, int, bool)
const llvm::FunctionType *FTy =
llvm::FunctionType::get(IdTy, Params, false);
return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
@@ -1674,18 +1796,22 @@ llvm::Function *CGObjCGNU::GetPropertySetFunction() {
CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
Params.push_back(IdTy);
Params.push_back(SelectorTy);
- // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
- Params.push_back(LongTy);
+ Params.push_back(IntTy);
Params.push_back(IdTy);
Params.push_back(BoolTy);
Params.push_back(BoolTy);
- // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ // void objc_setProperty (id, SEL, int, id, bool, bool)
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
"objc_setProperty"));
}
+// FIXME. Implement this.
+llvm::Function *CGObjCGNU::GetCopyStructFunction() {
+ return 0;
+}
+
llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -1764,7 +1890,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
llvm::SmallVector<llvm::Value*, 8> ESelArgs;
- llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+ llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
ESelArgs.push_back(Exc);
ESelArgs.push_back(Personality);
@@ -1772,12 +1898,13 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
bool HasCatchAll = false;
// Only @try blocks are allowed @catch blocks, but both can have @finally
if (isTry) {
- if (const ObjCAtCatchStmt* CatchStmt =
- cast<ObjCAtTryStmt>(S).getCatchStmts()) {
+ if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
CGF.setInvokeDest(CatchInCatch);
- for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
- const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
Handlers.push_back(std::make_pair(CatchDecl,
CatchStmt->getCatchBody()));
@@ -1816,7 +1943,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
ESelArgs.begin(), ESelArgs.end(), "selector");
for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const ParmVarDecl *CatchParam = Handlers[i].first;
+ const VarDecl *CatchParam = Handlers[i].first;
const Stmt *CatchBody = Handlers[i].second;
llvm::BasicBlock *Next = 0;
@@ -2046,7 +2173,14 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// when linked against code which isn't (most of the time).
llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
if (!IvarOffsetPointer) {
- uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+ uint64_t Offset;
+ if (ObjCImplementationDecl *OID =
+ CGM.getContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl *>(ID)))
+ Offset = ComputeIvarBaseOffset(CGM, OID, Ivar);
+ else
+ Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+
llvm::ConstantInt *OffsetGuess =
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset, "ivar");
// Don't emit the guess in non-PIC code because the linker will not be able
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index ac8fa05..77eabbf 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -124,9 +124,19 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
// layout object. However, this is blocked on other cleanups to the
// Objective-C code, so for now we just live with allocating a bunch of these
// objects.
- unsigned FieldNo = 0; // This value is unused.
+
+ // We always construct a single, possibly unaligned, access for this case.
+ CGBitFieldInfo::AccessInfo AI;
+ AI.FieldIndex = 0;
+ AI.FieldByteOffset = 0;
+ AI.FieldBitStart = BitOffset;
+ AI.AccessWidth = CGF.CGM.getContext().getTypeSize(IvarTy);
+ AI.AccessAlignment = 0;
+ AI.TargetBitOffset = 0;
+ AI.TargetBitWidth = BitFieldSize;
+
CGBitFieldInfo *Info =
- new (CGF.CGM.getContext()) CGBitFieldInfo(FieldNo, BitOffset, BitFieldSize,
+ new (CGF.CGM.getContext()) CGBitFieldInfo(BitFieldSize, 1, &AI,
IvarTy->isSignedIntegerType());
// FIXME: We need to set a very conservative alignment on this, or make sure
@@ -329,6 +339,24 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
+
+ llvm::Constant *getCopyStructFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_copyStruct (void *, const void *, size_t, bool, bool)
+ llvm::SmallVector<CanQualType,5> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo()),
+ false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
+ }
+
llvm::Constant *getEnumerationMutationFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -954,6 +982,10 @@ protected:
const ObjCMethodDecl *OMD,
const ObjCCommonTypesHelper &ObjCTypes);
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
public:
CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
CGM(cgm), VMContext(cgm.getLLVMContext()) { }
@@ -980,9 +1012,6 @@ public:
class CGObjCMac : public CGObjCCommonMac {
private:
ObjCTypesHelper ObjCTypes;
- /// EmitImageInfo - Emit the image info marker used to encode some module
- /// level information.
- void EmitImageInfo();
/// EmitModuleInfo - Another marker encoding module level
/// information.
@@ -1100,8 +1129,8 @@ public:
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
- bool IsClassMessage,
const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method);
virtual CodeGen::RValue
@@ -1134,6 +1163,7 @@ public:
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
@@ -1327,8 +1357,8 @@ public:
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
- bool IsClassMessage,
const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method);
virtual CodeGen::RValue
@@ -1366,6 +1396,11 @@ public:
virtual llvm::Constant *GetPropertySetFunction() {
return ObjCTypes.getSetPropertyFn();
}
+
+ virtual llvm::Constant *GetCopyStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+
virtual llvm::Constant *EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
@@ -1459,9 +1494,20 @@ llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
};
*/
+/// or Generate a constant NSString object.
+/*
+ struct __builtin_NSString {
+ const int *isa; // point to __NSConstantStringClassReference
+ const char *str;
+ unsigned int length;
+ };
+*/
+
llvm::Constant *CGObjCCommonMac::GenerateConstantString(
const StringLiteral *SL) {
- return CGM.GetAddrOfConstantCFString(SL);
+ return (CGM.getLangOptions().NoConstantCFStrings == 0 ?
+ CGM.GetAddrOfConstantCFString(SL) :
+ CGM.GetAddrOfConstantNSString(SL));
}
/// Generates a message send where the super is the receiver. This is
@@ -1531,8 +1577,8 @@ CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
- bool IsClassMessage,
const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
return EmitLegacyMessageSend(CGF, ResultType,
EmitSelector(CGF.Builder, Sel),
@@ -1696,7 +1742,6 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
Init,
"\01L_OBJC_PROTOCOL_" + PD->getName());
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
- Entry->setAlignment(4);
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
}
@@ -1718,7 +1763,6 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
0,
"\01L_OBJC_PROTOCOL_" + PD->getName());
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
- Entry->setAlignment(4);
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
}
@@ -2039,6 +2083,8 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
Interface->protocol_begin(),
Interface->protocol_end());
unsigned Flags = eClassFlags_Factory;
+ if (ID->getNumIvarInitializers())
+ Flags |= eClassFlags_HasCXXStructors;
unsigned Size =
CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
@@ -2430,6 +2476,10 @@ llvm::Constant *CGObjCMac::GetPropertySetFunction() {
return ObjCTypes.getSetPropertyFn();
}
+llvm::Constant *CGObjCMac::GetCopyStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+
llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
@@ -2597,8 +2647,9 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
CallTryExitPtr);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
- } else if (const ObjCAtCatchStmt* CatchStmt =
- cast<ObjCAtTryStmt>(S).getCatchStmts()) {
+ } else if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
+
// Enter a new exception try block (in case a @catch block throws
// an exception).
CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
@@ -2617,10 +2668,11 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// matched and avoid generating code for falling off the end if
// so.
bool AllMatched = false;
- for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+ for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
- const ParmVarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
const ObjCObjectPointerType *OPT = 0;
// catch(...) always matches.
@@ -2911,18 +2963,17 @@ llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
/// unsigned flags;
/// };
enum ImageInfoFlags {
- eImageInfo_FixAndContinue = (1 << 0), // FIXME: Not sure what
- // this implies.
+ eImageInfo_FixAndContinue = (1 << 0),
eImageInfo_GarbageCollected = (1 << 1),
eImageInfo_GCOnly = (1 << 2),
eImageInfo_OptimizedByDyld = (1 << 3), // FIXME: When is this set.
- // A flag indicating that the module has no instances of an
- // @synthesize of a superclass variable. <rdar://problem/6803242>
+ // A flag indicating that the module has no instances of a @synthesize of a
+ // superclass variable. <rdar://problem/6803242>
eImageInfo_CorrectedSynthesize = (1 << 4)
};
-void CGObjCMac::EmitImageInfo() {
+void CGObjCCommonMac::EmitImageInfo() {
unsigned version = 0; // Version is unused?
unsigned flags = 0;
@@ -3132,19 +3183,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
FieldDecl *Field = RecFields[i];
uint64_t FieldOffset;
if (RD) {
- const CGRecordLayout &RL =
- CGM.getTypes().getCGRecordLayout(Field->getParent());
- if (Field->isBitField()) {
- const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
-
- const llvm::Type *Ty =
- CGM.getTypes().ConvertTypeForMemRecursive(Field->getType());
- uint64_t TypeSize =
- CGM.getTypes().getTargetData().getTypeAllocSize(Ty);
- FieldOffset = Info.FieldNo * TypeSize;
- } else
- FieldOffset =
- Layout->getElementOffset(RL.getLLVMFieldNo(Field));
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ FieldOffset = RL.getFieldOffset(i) / 8;
} else
FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
@@ -3536,7 +3578,7 @@ void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
<< '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
- OS << '(' << CID->getNameAsString() << ')';
+ OS << '(' << CID << ')';
OS << ' ' << D->getSelector().getAsString() << ']';
}
@@ -3574,14 +3616,14 @@ void CGObjCMac::FinishModule() {
Asm += '\n';
llvm::raw_svector_ostream OS(Asm);
- for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
- e = LazySymbols.end(); I != e; ++I)
- OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
e = DefinedSymbols.end(); I != e; ++I)
OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
<< "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
-
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
+ e = LazySymbols.end(); I != e; ++I)
+ OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+
CGM.getModule().setModuleInlineAsm(OS.str());
}
}
@@ -3627,7 +3669,8 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// id self;
// Class cls;
// }
- RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+ RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct,
+ Ctx.getTranslationUnitDecl(),
SourceLocation(),
&Ctx.Idents.get("_objc_super"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
@@ -4088,7 +4131,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// };
// First the clang type for struct _message_ref_t
- RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+ RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct,
+ Ctx.getTranslationUnitDecl(),
SourceLocation(),
&Ctx.Idents.get("_message_ref_t"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
@@ -4162,7 +4206,7 @@ void CGObjCNonFragileABIMac::AddModuleClassList(const
llvm::GlobalValue::InternalLinkage,
Init,
SymbolName);
- GV->setAlignment(8);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
GV->setSection(SectionName);
CGM.AddUsedGlobal(GV);
}
@@ -4203,28 +4247,7 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
"\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
"__DATA, __objc_nlcatlist, regular, no_dead_strip");
- // static int L_OBJC_IMAGE_INFO[2] = { 0, flags };
- // FIXME. flags can be 0 | 1 | 2 | 6. For now just use 0
- std::vector<llvm::Constant*> Values(2);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, 0);
- unsigned int flags = 0;
- // FIXME: Fix and continue?
- if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
- flags |= eImageInfo_GarbageCollected;
- if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
- flags |= eImageInfo_GCOnly;
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
- llvm::Constant* Init = llvm::ConstantArray::get(
- llvm::ArrayType::get(ObjCTypes.IntTy, 2),
- Values);
- llvm::GlobalVariable *IMGV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::InternalLinkage,
- Init,
- "\01L_OBJC_IMAGE_INFO");
- IMGV->setSection("__DATA, __objc_imageinfo, regular, no_dead_strip");
- IMGV->setConstant(true);
- CGM.AddUsedGlobal(IMGV);
+ EmitImageInfo();
}
/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
@@ -4233,9 +4256,19 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
/// message dispatch call for all the rest.
///
bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
- if (CGM.getCodeGenOpts().ObjCLegacyDispatch)
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ default:
+ assert(0 && "Invalid dispatch method!");
+ case CodeGenOptions::Legacy:
return true;
+ case CodeGenOptions::NonLegacy:
+ return false;
+ case CodeGenOptions::Mixed:
+ break;
+ }
+ // If so, see whether this selector is in the white-list of things which must
+ // use the new dispatch convention. We lazily build a dense set for this.
if (NonLegacyDispatchMethods.empty()) {
NonLegacyDispatchMethods.insert(GetNullarySelector("alloc"));
NonLegacyDispatchMethods.insert(GetNullarySelector("class"));
@@ -4265,6 +4298,7 @@ bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
NonLegacyDispatchMethods.insert(
CGM.getContext().Selectors.getSelector(3, KeyIdents));
}
+
return (NonLegacyDispatchMethods.count(Sel) == 0);
}
@@ -4369,7 +4403,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
CLASS_RO_GV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassRonfABITy));
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
CLASS_RO_GV->setSection("__DATA, __objc_const");
return CLASS_RO_GV;
@@ -4405,7 +4439,7 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
GV->setInitializer(Init);
GV->setSection("__DATA, __objc_data");
GV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassnfABITy));
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassnfABITy));
if (HiddenVisibility)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
return GV;
@@ -4467,6 +4501,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
+ if (ID->getNumIvarInitializers())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
if (!ID->getClassInterface()->getSuperClass()) {
// class is root
flags |= CLS_ROOT;
@@ -4501,6 +4537,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
flags = CLS;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
+ if (ID->getNumIvarInitializers())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
flags |= CLS_EXCEPTION;
@@ -4655,7 +4693,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Init,
ExtCatName);
GCATV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.CategorynfABITy));
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.CategorynfABITy));
GCATV->setSection("__DATA, __objc_const");
CGM.AddUsedGlobal(GCATV);
DefinedCategories.push_back(GCATV);
@@ -4715,7 +4753,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
Init,
Name);
GV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
GV->setSection(Section);
CGM.AddUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV,
@@ -4750,7 +4788,7 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
Offset));
IvarOffsetGV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.LongTy));
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.LongTy));
// FIXME: This matches gcc, but shouldn't the visibility be set on the use as
// well (i.e., in ObjCIvarOffsetVariable).
@@ -4837,7 +4875,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
Init,
Prefix + OID->getName());
GV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
GV->setSection("__DATA, __objc_const");
CGM.AddUsedGlobal(GV);
@@ -4956,7 +4994,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
false, llvm::GlobalValue::WeakAnyLinkage, Init,
"\01l_OBJC_PROTOCOL_$_" + PD->getName());
Entry->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABITy));
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
Entry->setSection("__DATA,__datacoal_nt,coalesced");
}
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -4969,7 +5007,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
false, llvm::GlobalValue::WeakAnyLinkage, Entry,
"\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
PTGV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.AddUsedGlobal(PTGV);
@@ -5025,7 +5063,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
Name);
GV->setSection("__DATA, __objc_const");
GV->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
CGM.AddUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.ProtocolListnfABIPtrTy);
@@ -5184,8 +5222,8 @@ CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
- bool IsClassMessage,
const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
return LegacyDispatchedSelector(Sel)
? EmitLegacyMessageSend(CGF, ResultType, EmitSelector(CGF.Builder, Sel),
@@ -5222,7 +5260,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
ClassGV,
"\01L_OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(
+ CGM.getTargetData().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
@@ -5245,7 +5283,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
ClassGV,
"\01L_OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(
+ CGM.getTargetData().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
@@ -5271,7 +5309,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
MetaClassGV,
"\01L_OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
- CGM.getTargetData().getPrefTypeAlignment(
+ CGM.getTargetData().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
@@ -5532,45 +5570,44 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
// Construct the lists of (type, catch body) to handle.
- llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+ llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
bool HasCatchAll = false;
if (isTry) {
- if (const ObjCAtCatchStmt* CatchStmt =
- cast<ObjCAtTryStmt>(S).getCatchStmts()) {
- for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
- const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
-
- // catch(...) always matches.
- if (!CatchDecl) {
- // Use i8* null here to signal this is a catch all, not a cleanup.
- llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- SelectorArgs.push_back(Null);
- HasCatchAll = true;
- break;
- }
+ const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
+ for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
- if (CatchDecl->getType()->isObjCIdType() ||
- CatchDecl->getType()->isObjCQualifiedIdType()) {
- llvm::Value *IDEHType =
- CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
- if (!IDEHType)
- IDEHType =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
- false,
- llvm::GlobalValue::ExternalLinkage,
- 0, "OBJC_EHTYPE_id");
- SelectorArgs.push_back(IDEHType);
- } else {
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *PT =
- CatchDecl->getType()->getAs<ObjCObjectPointerType>();
- assert(PT && "Invalid @catch type.");
- const ObjCInterfaceType *IT = PT->getInterfaceType();
- assert(IT && "Invalid @catch type.");
- llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
- SelectorArgs.push_back(EHType);
- }
+ // catch(...) always matches.
+ if (!CatchDecl) {
+ // Use i8* null here to signal this is a catch all, not a cleanup.
+ llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ SelectorArgs.push_back(Null);
+ HasCatchAll = true;
+ break;
+ }
+
+ if (CatchDecl->getType()->isObjCIdType() ||
+ CatchDecl->getType()->isObjCQualifiedIdType()) {
+ llvm::Value *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id");
+ SelectorArgs.push_back(IDEHType);
+ } else {
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *PT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
+ SelectorArgs.push_back(EHType);
}
}
}
@@ -5589,7 +5626,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
SelectorArgs.begin(), SelectorArgs.end(),
"selector");
for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const ParmVarDecl *CatchParam = Handlers[i].first;
+ const VarDecl *CatchParam = Handlers[i].first;
const Stmt *CatchBody = Handlers[i].second;
llvm::BasicBlock *Next = 0;
@@ -5612,14 +5649,9 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
if (CatchBody) {
llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
- llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
// Cleanups must call objc_end_catch.
- //
- // FIXME: It seems incorrect for objc_begin_catch to be inside this
- // context, but this matches gcc.
CGF.PushCleanupBlock(MatchEnd);
- CGF.setInvokeDest(MatchHandler);
llvm::Value *ExcObject =
CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
@@ -5636,25 +5668,37 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
}
+ // Exceptions inside the catch block must be rethrown. We set a special
+ // purpose invoke destination for this which just collects the thrown
+ // exception and overwrites the object in RethrowPtr, branches through the
+ // match.end to make sure we call objc_end_catch, before branching to the
+ // rethrow handler.
+ llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
+ CGF.setInvokeDest(MatchHandler);
CGF.ObjCEHValueStack.push_back(ExcObject);
CGF.EmitStmt(CatchBody);
CGF.ObjCEHValueStack.pop_back();
+ CGF.setInvokeDest(0);
CGF.EmitBranchThroughCleanup(FinallyEnd);
- CGF.EmitBlock(MatchHandler);
-
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- llvm::SmallVector<llvm::Value*, 8> Args;
- Args.push_back(Exc);
- Args.push_back(ObjCTypes.getEHPersonalityPtr());
- Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- 0));
- CGF.Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end());
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ // Don't emit the extra match handler if there we no unprotected calls in
+ // the catch block.
+ if (MatchHandler->use_empty()) {
+ delete MatchHandler;
+ } else {
+ CGF.EmitBlock(MatchHandler);
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ CGF.Builder.CreateCall3(llvm_eh_selector,
+ Exc, ObjCTypes.getEHPersonalityPtr(),
+ llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0),
+ "unused_eh_selector");
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
@@ -5666,8 +5710,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.createBasicBlock("match.end.handler");
llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
- Cont, MatchEndHandler,
- Args.begin(), Args.begin());
+ Cont, MatchEndHandler);
CGF.EmitBlock(Cont);
if (Info.SwitchBlock)
@@ -5676,15 +5719,14 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.EmitBlock(Info.EndBlock);
CGF.EmitBlock(MatchEndHandler);
- Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
// We are required to emit this call to satisfy LLVM, even
// though we don't use the result.
- Args.clear();
- Args.push_back(Exc);
- Args.push_back(ObjCTypes.getEHPersonalityPtr());
- Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- 0));
- CGF.Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end());
+ CGF.Builder.CreateCall3(llvm_eh_selector,
+ Exc, ObjCTypes.getEHPersonalityPtr(),
+ llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0),
+ "unused_eh_selector");
CGF.Builder.CreateStore(Exc, RethrowPtr);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
@@ -5723,9 +5765,19 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Branch around the rethrow code.
CGF.EmitBranch(FinallyEnd);
+ // Generate the rethrow code, taking care to use an invoke if we are in a
+ // nested exception scope.
CGF.EmitBlock(FinallyRethrow);
- CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr));
+ if (PrevLandingPad) {
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getUnwindResumeOrRethrowFn(),
+ Cont, PrevLandingPad,
+ CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.EmitBlock(Cont);
+ } else {
+ CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr));
+ }
CGF.Builder.CreateUnreachable();
CGF.EmitBlock(FinallyEnd);
@@ -5816,7 +5868,8 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
- Entry->setAlignment(8);
+ Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.EHTypeTy));
if (ForDefinition) {
Entry->setSection("__DATA,__objc_const");
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index e478394..654ad0a 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -122,8 +122,8 @@ public:
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
- bool IsClassMessage,
const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class = 0,
const ObjCMethodDecl *Method = 0) = 0;
/// Generate an Objective-C message send operation to the super
@@ -167,6 +167,9 @@ public:
/// Return the runtime function for setting properties.
virtual llvm::Constant *GetPropertySetFunction() = 0;
+ // API for atomic copying of qualified aggregates in setter/getter.
+ virtual llvm::Constant *GetCopyStructFunction() = 0;
+
/// GetClass - Return a reference to the class for the given
/// interface decl.
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 1caec97..aec1c45 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -31,8 +31,8 @@ class RTTIBuilder {
/// descriptor of the given type.
llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
- /// BuildVtablePointer - Build the vtable pointer for the given type.
- void BuildVtablePointer(const Type *Ty);
+ /// BuildVTablePointer - Build the vtable pointer for the given type.
+ void BuildVTablePointer(const Type *Ty);
/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
/// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
@@ -148,6 +148,9 @@ public:
};
/// BuildTypeInfo - Build the RTTI type info struct for the given type.
+ ///
+ /// \param Force - true to force the creation of this RTTI value
+ /// \param ForEH - true if this is for exception handling
llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
};
}
@@ -242,9 +245,10 @@ static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
}
/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
-/// the given type exists somewhere else, and that we should not emit the typ
+/// the given type exists somewhere else, and that we should not emit the type
/// information in this translation unit.
-bool ShouldUseExternalRTTIDescriptor(QualType Ty) {
+static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
+ QualType Ty) {
// Type info for builtin types is defined in the standard library.
if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
return TypeInfoIsInStandardLibrary(BuiltinTy);
@@ -254,6 +258,9 @@ bool ShouldUseExternalRTTIDescriptor(QualType Ty) {
if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
return TypeInfoIsInStandardLibrary(PointerTy);
+ // If RTTI is disabled, don't consider key functions.
+ if (!Context.getLangOptions().RTTI) return false;
+
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!RD->hasDefinition())
@@ -337,7 +344,7 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
if (RD->isDynamicClass())
- return CodeGenModule::getVtableLinkage(RD);
+ return CodeGenModule::getVTableLinkage(RD);
}
return llvm::GlobalValue::WeakODRLinkage;
@@ -375,8 +382,8 @@ static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
return true;
}
-void RTTIBuilder::BuildVtablePointer(const Type *Ty) {
- const char *VtableName;
+void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
+ const char *VTableName;
switch (Ty->getTypeClass()) {
default: assert(0 && "Unhandled type!");
@@ -386,24 +393,24 @@ void RTTIBuilder::BuildVtablePointer(const Type *Ty) {
case Type::Vector:
case Type::ExtVector:
// abi::__fundamental_type_info.
- VtableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
break;
case Type::ConstantArray:
case Type::IncompleteArray:
// abi::__array_type_info.
- VtableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
break;
case Type::FunctionNoProto:
case Type::FunctionProto:
// abi::__function_type_info.
- VtableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
break;
case Type::Enum:
// abi::__enum_type_info.
- VtableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
break;
case Type::Record: {
@@ -412,13 +419,13 @@ void RTTIBuilder::BuildVtablePointer(const Type *Ty) {
if (!RD->hasDefinition() || !RD->getNumBases()) {
// abi::__class_type_info.
- VtableName = "_ZTVN10__cxxabiv117__class_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv117__class_type_infoE";
} else if (CanUseSingleInheritance(RD)) {
// abi::__si_class_type_info.
- VtableName = "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv120__si_class_type_infoE";
} else {
// abi::__vmi_class_type_info.
- VtableName = "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
}
break;
@@ -426,30 +433,31 @@ void RTTIBuilder::BuildVtablePointer(const Type *Ty) {
case Type::Pointer:
// abi::__pointer_type_info.
- VtableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
break;
case Type::MemberPointer:
// abi::__pointer_to_member_type_info.
- VtableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+ VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
break;
}
- llvm::Constant *Vtable =
- CGM.getModule().getOrInsertGlobal(VtableName, Int8PtrTy);
+ llvm::Constant *VTable =
+ CGM.getModule().getOrInsertGlobal(VTableName, Int8PtrTy);
const llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
// The vtable address point is 2.
llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
- Vtable = llvm::ConstantExpr::getInBoundsGetElementPtr(Vtable, &Two, 1);
- Vtable = llvm::ConstantExpr::getBitCast(Vtable, Int8PtrTy);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, &Two, 1);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, Int8PtrTy);
- Fields.push_back(Vtable);
+ Fields.push_back(VTable);
}
-llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty,
+ bool Force) {
// We want to operate on the canonical type.
Ty = CGM.getContext().getCanonicalType(Ty);
@@ -463,13 +471,13 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
// Check if there is already an external RTTI descriptor for this type.
- if (!Force && ShouldUseExternalRTTIDescriptor(Ty))
+ if (!Force && ShouldUseExternalRTTIDescriptor(CGM.getContext(), Ty))
return GetAddrOfExternalRTTIDescriptor(Ty);
llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(Ty);
// Add the vtable pointer.
- BuildVtablePointer(cast<Type>(Ty));
+ BuildVTablePointer(cast<Type>(Ty));
// And the name.
Fields.push_back(BuildName(Ty, DecideHidden(Ty), Linkage));
@@ -782,42 +790,17 @@ void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(QualType(ClassType, 0)));
}
-llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty) {
- if (!getContext().getLangOptions().RTTI) {
+llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
+ bool ForEH) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!ForEH && !getContext().getLangOptions().RTTI) {
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
return llvm::Constant::getNullValue(Int8PtrTy);
}
-
- return RTTIBuilder(*this).BuildTypeInfo(Ty);
-}
-// Try to find the magic class __cxxabiv1::__fundamental_type_info. If
-// exists and has a destructor, we will emit the typeinfo for the fundamental
-// types. This is the same behaviour as GCC.
-static CXXRecordDecl *FindMagicClass(ASTContext &AC) {
- const IdentifierInfo &NamespaceII = AC.Idents.get("__cxxabiv1");
- DeclarationName NamespaceDN = AC.DeclarationNames.getIdentifier(&NamespaceII);
- TranslationUnitDecl *TUD = AC.getTranslationUnitDecl();
- DeclContext::lookup_result NamespaceLookup = TUD->lookup(NamespaceDN);
- if (NamespaceLookup.first == NamespaceLookup.second)
- return NULL;
- const NamespaceDecl *Namespace =
- dyn_cast<NamespaceDecl>(*NamespaceLookup.first);
- if (!Namespace)
- return NULL;
-
- const IdentifierInfo &ClassII = AC.Idents.get("__fundamental_type_info");
- DeclarationName ClassDN = AC.DeclarationNames.getIdentifier(&ClassII);
- DeclContext::lookup_const_result ClassLookup = Namespace->lookup(ClassDN);
- if (ClassLookup.first == ClassLookup.second)
- return NULL;
- CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(*ClassLookup.first);
-
- if (Class->hasDefinition() && Class->isDynamicClass() &&
- Class->getDestructor(AC))
- return Class;
-
- return NULL;
+ return RTTIBuilder(*this).BuildTypeInfo(Ty);
}
void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
@@ -829,12 +812,6 @@ void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
}
void CodeGenModule::EmitFundamentalRTTIDescriptors() {
- CXXRecordDecl *RD = FindMagicClass(getContext());
- if (!RD)
- return;
-
- getVTables().GenerateClassData(getVtableLinkage(RD), RD);
-
QualType FundamentalTypes[] = { Context.VoidTy, Context.Char32Ty,
Context.Char16Ty, Context.UnsignedLongLongTy,
Context.LongLongTy, Context.WCharTy,
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index fd1775e..9f966fb 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -13,22 +13,137 @@
#include "llvm/ADT/DenseMap.h"
#include "clang/AST/Decl.h"
namespace llvm {
+ class raw_ostream;
class Type;
}
namespace clang {
namespace CodeGen {
+/// \brief Helper object for describing how to generate the code for access to a
+/// bit-field.
+///
+/// This structure is intended to describe the "policy" of how the bit-field
+/// should be accessed, which may be target, language, or ABI dependent.
class CGBitFieldInfo {
public:
- CGBitFieldInfo(unsigned FieldNo, unsigned Start, unsigned Size,
- bool IsSigned)
- : FieldNo(FieldNo), Start(Start), Size(Size), IsSigned(IsSigned) {}
+ /// Descriptor for a single component of a bit-field access. The entire
+ /// bit-field is constituted of a bitwise OR of all of the individual
+ /// components.
+ ///
+ /// Each component describes an accessed value, which is how the component
+ /// should be transferred to/from memory, and a target placement, which is how
+ /// that component fits into the constituted bit-field. The pseudo-IR for a
+ /// load is:
+ ///
+ /// %0 = gep %base, 0, FieldIndex
+ /// %1 = gep (i8*) %0, FieldByteOffset
+ /// %2 = (i(AccessWidth) *) %1
+ /// %3 = load %2, align AccessAlignment
+ /// %4 = shr %3, FieldBitStart
+ ///
+ /// and the composed bit-field is formed as the boolean OR of all accesses,
+ /// masked to TargetBitWidth bits and shifted to TargetBitOffset.
+ struct AccessInfo {
+ /// Offset of the field to load in the LLVM structure, if any.
+ unsigned FieldIndex;
+
+ /// Byte offset from the field address, if any. This should generally be
+ /// unused as the cleanest IR comes from having a well-constructed LLVM type
+ /// with proper GEP instructions, but sometimes its use is required, for
+ /// example if an access is intended to straddle an LLVM field boundary.
+ unsigned FieldByteOffset;
+
+ /// Bit offset in the accessed value to use. The width is implied by \see
+ /// TargetBitWidth.
+ unsigned FieldBitStart;
+
+ /// Bit width of the memory access to perform.
+ unsigned AccessWidth;
+
+ /// The alignment of the memory access, or 0 if the default alignment should
+ /// be used.
+ //
+ // FIXME: Remove use of 0 to encode default, instead have IRgen do the right
+ // thing when it generates the code, if avoiding align directives is
+ // desired.
+ unsigned AccessAlignment;
+
+ /// Offset for the target value.
+ unsigned TargetBitOffset;
+
+ /// Number of bits in the access that are destined for the bit-field.
+ unsigned TargetBitWidth;
+ };
- unsigned FieldNo;
- unsigned Start;
+private:
+ /// The components to use to access the bit-field. We may need up to three
+ /// separate components to support up to i64 bit-field access (4 + 2 + 1 byte
+ /// accesses).
+ //
+ // FIXME: De-hardcode this, just allocate following the struct.
+ AccessInfo Components[3];
+
+ /// The total size of the bit-field, in bits.
unsigned Size;
+
+ /// The number of access components to use.
+ unsigned NumComponents;
+
+ /// Whether the bit-field is signed.
bool IsSigned : 1;
+
+public:
+ CGBitFieldInfo(unsigned Size, unsigned NumComponents, AccessInfo *_Components,
+ bool IsSigned) : Size(Size), NumComponents(NumComponents),
+ IsSigned(IsSigned) {
+ assert(NumComponents <= 3 && "invalid number of components!");
+ for (unsigned i = 0; i != NumComponents; ++i)
+ Components[i] = _Components[i];
+
+ // Check some invariants.
+ unsigned AccessedSize = 0;
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ AccessedSize += AI.TargetBitWidth;
+
+ // We shouldn't try to load 0 bits.
+ assert(AI.TargetBitWidth > 0);
+
+ // We can't load more bits than we accessed.
+ assert(AI.FieldBitStart + AI.TargetBitWidth <= AI.AccessWidth);
+
+ // We shouldn't put any bits outside the result size.
+ assert(AI.TargetBitWidth + AI.TargetBitOffset <= Size);
+ }
+
+ // Check that the total number of target bits matches the total bit-field
+ // size.
+ assert(AccessedSize == Size && "Total size does not match accessed size!");
+ }
+
+public:
+ /// \brief Check whether this bit-field access is (i.e., should be sign
+ /// extended on loads).
+ bool isSigned() const { return IsSigned; }
+
+ /// \brief Get the size of the bit-field, in bits.
+ unsigned getSize() const { return Size; }
+
+ /// @name Component Access
+ /// @{
+
+ unsigned getNumComponents() const { return NumComponents; }
+
+ const AccessInfo &getComponent(unsigned Index) const {
+ assert(Index < getNumComponents() && "Invalid access!");
+ return Components[Index];
+ }
+
+ /// @}
+
+ void print(llvm::raw_ostream &OS) const;
+ void dump() const;
};
/// CGRecordLayout - This class handles struct and union layout info while
@@ -71,15 +186,15 @@ public:
return ContainsPointerToDataMember;
}
- /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ /// \brief Return llvm::StructType element number that corresponds to the
+ /// field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const {
assert(!FD->isBitField() && "Invalid call for bit-field decl!");
assert(FieldInfo.count(FD) && "Invalid field for record!");
return FieldInfo.lookup(FD);
}
- /// \brief Return llvm::StructType element number that corresponds to the
- /// field FD.
+ /// \brief Return the BitFieldInfo that corresponds to the field FD.
const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
assert(FD->isBitField() && "Invalid call for non bit-field decl!");
llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
@@ -87,6 +202,9 @@ public:
assert(it != BitFields.end() && "Unable to find bitfield info");
return it->second;
}
+
+ void print(llvm::raw_ostream &OS) const;
+ void dump() const;
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 4b9ec66..6302cf8 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -18,8 +18,10 @@
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "CodeGenTypes.h"
-#include "llvm/Type.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -67,6 +69,11 @@ private:
/// NextFieldOffsetInBytes - Holds the next field offset in bytes.
uint64_t NextFieldOffsetInBytes;
+ /// LayoutUnionField - Will layout a field in an union and return the type
+ /// that the field will have.
+ const llvm::Type *LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout);
+
/// LayoutUnion - Will layout a union RecordDecl.
void LayoutUnion(const RecordDecl *D);
@@ -87,10 +94,6 @@ private:
/// AppendField - Appends a field with the given offset and type.
void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
- /// AppendPadding - Appends enough padding bytes so that the total struct
- /// size matches the alignment of the passed in type.
- void AppendPadding(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
-
/// AppendPadding - Appends enough padding bytes so that the total
/// struct size is a multiple of the field alignment.
void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
@@ -103,7 +106,6 @@ private:
void AppendTailPadding(uint64_t RecordSize);
unsigned getTypeAlignment(const llvm::Type *Ty) const;
- uint64_t getTypeSizeInBytes(const llvm::Type *Ty) const;
/// CheckForPointerToDataMember - Check if the given type contains a pointer
/// to data member.
@@ -145,6 +147,104 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
LayoutFields(D);
}
+static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
+ uint64_t ContainingTypeSizeInBits = RL.getSize();
+ unsigned ContainingTypeAlign = RL.getAlignment();
+
+ const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
+ uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
+ uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
+
+ bool IsSigned = FD->getType()->isSignedIntegerType();
+
+ if (FieldSize > TypeSizeInBits) {
+ // We have a wide bit-field. The extra bits are only used for padding, so
+ // if we have a bitfield of type T, with size N:
+ //
+ // T t : N;
+ //
+ // We can just assume that it's:
+ //
+ // T t : sizeof(T);
+ //
+ FieldSize = TypeSizeInBits;
+ }
+
+ // Compute the access components. The policy we use is to start by attempting
+ // to access using the width of the bit-field type itself and to always access
+ // at aligned indices of that type. If such an access would fail because it
+ // extends past the bound of the type, then we reduce size to the next smaller
+ // power of two and retry. The current algorithm assumes pow2 sized types,
+ // although this is easy to fix.
+ //
+ // FIXME: This algorithm is wrong on big-endian systems, I think.
+ assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
+ CGBitFieldInfo::AccessInfo Components[3];
+ unsigned NumComponents = 0;
+ unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
+ unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
+
+ // Round down from the field offset to find the first access position that is
+ // at an aligned offset of the initial access type.
+ uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+
+ // Adjust initial access size to fit within record.
+ while (AccessWidth > 8 &&
+ AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ AccessWidth >>= 1;
+ AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+ }
+
+ while (AccessedTargetBits < FieldSize) {
+ // Check that we can access using a type of this size, without reading off
+ // the end of the structure. This can occur with packed structures and
+ // -fno-bitfield-type-align, for example.
+ if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ // If so, reduce access size to the next smaller power-of-two and retry.
+ AccessWidth >>= 1;
+ assert(AccessWidth >= 8 && "Cannot access under byte size!");
+ continue;
+ }
+
+ // Otherwise, add an access component.
+
+ // First, compute the bits inside this access which are part of the
+ // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
+ // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
+ // in the target that we are reading.
+ assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
+ assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
+ uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
+ uint64_t AccessBitsInFieldSize =
+ std::min(AccessWidth + AccessStart,
+ FieldOffset + FieldSize) - AccessBitsInFieldStart;
+
+ assert(NumComponents < 3 && "Unexpected number of components!");
+ CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
+ AI.FieldIndex = 0;
+ // FIXME: We still follow the old access pattern of only using the field
+ // byte offset. We should switch this once we fix the struct layout to be
+ // pretty.
+ AI.FieldByteOffset = AccessStart / 8;
+ AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
+ AI.AccessWidth = AccessWidth;
+ AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
+ AI.TargetBitOffset = AccessedTargetBits;
+ AI.TargetBitWidth = AccessBitsInFieldSize;
+
+ AccessStart += AccessWidth;
+ AccessedTargetBits += AI.TargetBitWidth;
+ }
+
+ assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
+ return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
+}
+
void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
uint64_t FieldOffset) {
uint64_t FieldSize =
@@ -175,14 +275,9 @@ void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
assert(NumBytesToAppend && "No bytes to append!");
}
- const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
- uint64_t TypeSizeInBits = getTypeSizeInBytes(Ty) * 8;
-
- bool IsSigned = D->getType()->isSignedIntegerType();
- LLVMBitFields.push_back(LLVMBitFieldInfo(
- D, CGBitFieldInfo(FieldOffset / TypeSizeInBits,
- FieldOffset % TypeSizeInBits,
- FieldSize, IsSigned)));
+ // Add the bit field info.
+ LLVMBitFields.push_back(
+ LLVMBitFieldInfo(D, ComputeBitFieldInfo(Types, D, FieldOffset, FieldSize)));
AppendBytes(NumBytesToAppend);
@@ -254,6 +349,35 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
return true;
}
+const llvm::Type *
+CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout) {
+ if (Field->isBitField()) {
+ uint64_t FieldSize =
+ Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+
+ // Ignore zero sized bit fields.
+ if (FieldSize == 0)
+ return 0;
+
+ const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ unsigned NumBytesToAppend =
+ llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+
+ if (NumBytesToAppend > 1)
+ FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
+
+ // Add the bit field info.
+ LLVMBitFields.push_back(
+ LLVMBitFieldInfo(Field, ComputeBitFieldInfo(Types, Field, 0, FieldSize)));
+ return FieldTy;
+ }
+
+ // This is a regular union field.
+ LLVMFields.push_back(LLVMFieldInfo(Field, 0));
+ return Types.ConvertTypeForMemRecursive(Field->getType());
+}
+
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
@@ -270,28 +394,13 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
assert(Layout.getFieldOffset(FieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
+ const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
- if (Field->isBitField()) {
- uint64_t FieldSize =
- Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
-
- // Ignore zero sized bit fields.
- if (FieldSize == 0)
- continue;
-
- // Add the bit field info.
- bool IsSigned = Field->getType()->isSignedIntegerType();
- LLVMBitFields.push_back(LLVMBitFieldInfo(
- *Field, CGBitFieldInfo(0, 0, FieldSize,
- IsSigned)));
- } else {
- LLVMFields.push_back(LLVMFieldInfo(*Field, 0));
- }
+ if (!FieldTy)
+ continue;
HasOnlyZeroSizedBitFields = false;
- const llvm::Type *FieldTy =
- Types.ConvertTypeForMemRecursive(Field->getType());
unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
@@ -334,7 +443,7 @@ void CGRecordLayoutBuilder::LayoutBases(const CXXRecordDecl *RD,
llvm::Type::getInt8PtrTy(Types.getLLVMContext());
assert(NextFieldOffsetInBytes == 0 &&
- "Vtable pointer must come first!");
+ "VTable pointer must come first!");
AppendField(NextFieldOffsetInBytes, Int8PtrTy->getPointerTo());
}
}
@@ -388,7 +497,7 @@ void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
getTypeAlignment(FieldTy));
- uint64_t FieldSizeInBytes = getTypeSizeInBytes(FieldTy);
+ uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
FieldTypes.push_back(FieldTy);
@@ -396,12 +505,6 @@ void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
BitsAvailableInLastField = 0;
}
-void
-CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
- const llvm::Type *FieldTy) {
- AppendPadding(FieldOffsetInBytes, getTypeAlignment(FieldTy));
-}
-
void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
unsigned FieldAlignment) {
assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
@@ -439,10 +542,6 @@ unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
return Types.getTargetData().getABITypeAlignment(Ty);
}
-uint64_t CGRecordLayoutBuilder::getTypeSizeInBytes(const llvm::Type *Ty) const {
- return Types.getTargetData().getTypeAllocSize(Ty);
-}
-
void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
// This record already contains a member pointer.
if (ContainsPointerToDataMember)
@@ -481,9 +580,6 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
Builder.FieldTypes,
Builder.Packed);
- assert(getContext().getASTRecordLayout(D).getSize() / 8 ==
- getTargetData().getTypeAllocSize(Ty) &&
- "Type size mismatch!");
CGRecordLayout *RL =
new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
@@ -496,5 +592,121 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
for (unsigned i = 0, e = Builder.LLVMBitFields.size(); i != e; ++i)
RL->BitFields.insert(Builder.LLVMBitFields[i]);
+ // Dump the layout, if requested.
+ if (getContext().getLangOptions().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
+ llvm::errs() << "Record: ";
+ D->dump();
+ llvm::errs() << "\nLayout: ";
+ RL->dump();
+ }
+
+#ifndef NDEBUG
+ // Verify that the computed LLVM struct size matches the AST layout size.
+ uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
+ assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
+ "Type size mismatch!");
+
+ // Verify that the LLVM and AST field offsets agree.
+ const llvm::StructType *ST =
+ dyn_cast<llvm::StructType>(RL->getLLVMType());
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
+
+ const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
+ RecordDecl::field_iterator it = D->field_begin();
+ for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
+ const FieldDecl *FD = *it;
+
+ // For non-bit-fields, just check that the LLVM struct offset matches the
+ // AST offset.
+ if (!FD->isBitField()) {
+ unsigned FieldNo = RL->getLLVMFieldNo(FD);
+ assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
+ "Invalid field offset!");
+ continue;
+ }
+
+ // Ignore unnamed bit-fields.
+ if (!FD->getDeclName())
+ continue;
+
+ const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Verify that every component access is within the structure.
+ uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
+ uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
+ assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
+ "Invalid bit-field access (out of range)!");
+ }
+ }
+#endif
+
return RL;
}
+
+void CGRecordLayout::print(llvm::raw_ostream &OS) const {
+ OS << "<CGRecordLayout\n";
+ OS << " LLVMType:" << *LLVMType << "\n";
+ OS << " ContainsPointerToDataMember:" << ContainsPointerToDataMember << "\n";
+ OS << " BitFields:[\n";
+
+ // Print bit-field infos in declaration order.
+ std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
+ for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
+ it = BitFields.begin(), ie = BitFields.end();
+ it != ie; ++it) {
+ const RecordDecl *RD = it->first->getParent();
+ unsigned Index = 0;
+ for (RecordDecl::field_iterator
+ it2 = RD->field_begin(); *it2 != it->first; ++it2)
+ ++Index;
+ BFIs.push_back(std::make_pair(Index, &it->second));
+ }
+ llvm::array_pod_sort(BFIs.begin(), BFIs.end());
+ for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
+ OS.indent(4);
+ BFIs[i].second->print(OS);
+ OS << "\n";
+ }
+
+ OS << "]>\n";
+}
+
+void CGRecordLayout::dump() const {
+ print(llvm::errs());
+}
+
+void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
+ OS << "<CGBitFieldInfo";
+ OS << " Size:" << Size;
+ OS << " IsSigned:" << IsSigned << "\n";
+
+ OS.indent(4 + strlen("<CGBitFieldInfo"));
+ OS << " NumComponents:" << getNumComponents();
+ OS << " Components: [";
+ if (getNumComponents()) {
+ OS << "\n";
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ OS.indent(8);
+ OS << "<AccessInfo"
+ << " FieldIndex:" << AI.FieldIndex
+ << " FieldByteOffset:" << AI.FieldByteOffset
+ << " FieldBitStart:" << AI.FieldBitStart
+ << " AccessWidth:" << AI.AccessWidth << "\n";
+ OS.indent(8 + strlen("<AccessInfo"));
+ OS << " AccessAlignment:" << AI.AccessAlignment
+ << " TargetBitOffset:" << AI.TargetBitOffset
+ << " TargetBitWidth:" << AI.TargetBitWidth
+ << ">\n";
+ }
+ OS.indent(4);
+ }
+ OS << "]>";
+}
+
+void CGBitFieldInfo::dump() const {
+ print(llvm::errs());
+}
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index ae2f791..a914c80d 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -160,7 +160,7 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
EmitStmt(*I);
if (DI) {
- DI->setLocation(S.getLBracLoc());
+ DI->setLocation(S.getRBracLoc());
DI->EmitRegionEnd(CurFn, Builder);
}
@@ -205,6 +205,8 @@ void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
}
void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
// Fall out of the current block (if necessary).
EmitBranch(BB);
@@ -225,7 +227,12 @@ void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
}
}
- CurFn->getBasicBlockList().push_back(BB);
+ // Place the block after the current block, if possible, or else at
+ // the end of the function.
+ if (CurBB && CurBB->getParent())
+ CurFn->getBasicBlockList().insertAfter(CurBB, BB);
+ else
+ CurFn->getBasicBlockList().push_back(BB);
Builder.SetInsertPoint(BB);
}
@@ -974,19 +981,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
unsigned InputNo;
for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
- if (Input.hasTiedOperand() &&
- Input.getTiedOperand() == i)
+ if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
break;
}
assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
QualType InputTy = S.getInputExpr(InputNo)->getType();
- QualType OutputTy = OutExpr->getType();
+ QualType OutputType = OutExpr->getType();
uint64_t InputSize = getContext().getTypeSize(InputTy);
- if (getContext().getTypeSize(OutputTy) < InputSize) {
- // Form the asm to return the value as a larger integer type.
- ResultRegTypes.back() = llvm::IntegerType::get(VMContext, (unsigned)InputSize);
+ if (getContext().getTypeSize(OutputType) < InputSize) {
+ // Form the asm to return the value as a larger integer or fp type.
+ ResultRegTypes.back() = ConvertType(InputTy);
}
}
} else {
@@ -1036,17 +1042,20 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// that is usually cheaper, but LLVM IR should really get an anyext someday.
if (Info.hasTiedOperand()) {
unsigned Output = Info.getTiedOperand();
- QualType OutputTy = S.getOutputExpr(Output)->getType();
+ QualType OutputType = S.getOutputExpr(Output)->getType();
QualType InputTy = InputExpr->getType();
- if (getContext().getTypeSize(OutputTy) >
+ if (getContext().getTypeSize(OutputType) >
getContext().getTypeSize(InputTy)) {
// Use ptrtoint as appropriate so that we can do our extension.
if (isa<llvm::PointerType>(Arg->getType()))
Arg = Builder.CreatePtrToInt(Arg,
- llvm::IntegerType::get(VMContext, LLVMPointerWidth));
- unsigned OutputSize = (unsigned)getContext().getTypeSize(OutputTy);
- Arg = Builder.CreateZExt(Arg, llvm::IntegerType::get(VMContext, OutputSize));
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth));
+ const llvm::Type *OutputTy = ConvertType(OutputType);
+ if (isa<llvm::IntegerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, OutputTy);
+ else
+ Arg = Builder.CreateFPExt(Arg, OutputTy);
}
}
@@ -1102,6 +1111,12 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+ // Slap the source location of the inline asm into a !srcloc metadata on the
+ // call.
+ unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
+ llvm::Value *LocIDC =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LocID);
+ Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
@@ -1121,14 +1136,23 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// the expression, do the conversion.
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
const llvm::Type *TruncTy = ResultTruncRegTypes[i];
- // Truncate the integer result to the right size, note that
- // ResultTruncRegTypes can be a pointer.
- uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
- Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext, (unsigned)ResSize));
-
- if (Tmp->getType() != TruncTy) {
- assert(isa<llvm::PointerType>(TruncTy));
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext,
+ (unsigned)ResSize));
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(VMContext,
+ (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
}
}
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
index 6d38ab9..a8f0467 100644
--- a/lib/CodeGen/CGTemporaries.cpp
+++ b/lib/CodeGen/CGTemporaries.cpp
@@ -23,7 +23,7 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
"Pushed the same temporary twice; AST is likely wrong");
llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
- llvm::Value *CondPtr = 0;
+ llvm::AllocaInst *CondPtr = 0;
// Check if temporaries need to be conditional. If so, we'll create a
// condition boolean, initialize it to 0 and
@@ -32,10 +32,7 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
// Initialize it to false. This initialization takes place right after
// the alloca insert point.
- llvm::StoreInst *SI =
- new llvm::StoreInst(llvm::ConstantInt::getFalse(VMContext), CondPtr);
- llvm::BasicBlock *Block = AllocaInsertPt->getParent();
- Block->getInstList().insertAfter((llvm::Instruction *)AllocaInsertPt, SI);
+ InitTempAlloca(CondPtr, llvm::ConstantInt::getFalse(VMContext));
// Now set it to true.
Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
@@ -64,7 +61,8 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
}
EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, Info.ThisPtr);
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ Info.ThisPtr);
if (CondEnd) {
// Reset the condition. to false.
@@ -107,7 +105,7 @@ void CodeGenFunction::PopCXXTemporary() {
}
EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, Info.ThisPtr);
+ Dtor_Complete, /*ForVirtualBase=*/false, Info.ThisPtr);
if (CondEnd) {
// Reset the condition. to false.
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 91d9f76..15e5648 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -43,7 +43,7 @@ class VTTBuilder {
/// SubVTTIndicies - The sub-VTT indices for the bases of the most derived
/// class.
- llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies;
+ llvm::DenseMap<BaseSubobject, uint64_t> SubVTTIndicies;
/// SecondaryVirtualPointerIndices - The secondary virtual pointer indices of
/// all subobjects of the most derived class.
@@ -116,8 +116,7 @@ public:
}
/// getSubVTTIndicies - Returns a reference to the sub-VTT indices.
- const llvm::DenseMap<const CXXRecordDecl *, uint64_t> &
- getSubVTTIndicies() const {
+ const llvm::DenseMap<BaseSubobject, uint64_t> &getSubVTTIndicies() const {
return SubVTTIndicies;
}
@@ -179,13 +178,14 @@ void VTTBuilder::AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
// The vtable is a construction vtable, look in the construction vtable
// address points.
AddressPoint = AddressPoints.lookup(Base);
+ assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
} else {
// Just get the address point for the regular vtable.
AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ assert(AddressPoint != 0 && "Did not find vtable address point!");
}
if (!AddressPoint) AddressPoint = 0;
- assert(AddressPoint != 0 && "Did not find an address point!");
llvm::Value *Idxs[] = {
llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0),
@@ -340,7 +340,7 @@ void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
if (!IsPrimaryVTT) {
// Remember the sub-VTT index.
- SubVTTIndicies[RD] = VTTComponents.size();
+ SubVTTIndicies[Base] = VTTComponents.size();
}
AddressPointsMapTy AddressPoints;
@@ -434,25 +434,25 @@ bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
}
uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
- const CXXRecordDecl *Base) {
- ClassPairTy ClassPair(RD, Base);
+ BaseSubobject Base) {
+ BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
- SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassPair);
+ SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
if (I != SubVTTIndicies.end())
return I->second;
VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
- for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::const_iterator I =
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
Builder.getSubVTTIndicies().begin(),
E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
// Insert all indices.
- ClassPairTy ClassPair(RD, I->first);
+ BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
- SubVTTIndicies.insert(std::make_pair(ClassPair, I->second));
+ SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
}
- I = SubVTTIndicies.find(ClassPair);
+ I = SubVTTIndicies.find(ClassSubobjectPair);
assert(I != SubVTTIndicies.end() && "Did not find index!");
return I->second;
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVTables.cpp
index fc6d1a8..159753a 100644
--- a/lib/CodeGen/CGVtable.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -1,4 +1,4 @@
-//===--- CGVtable.cpp - Emit LLVM Code for C++ vtables --------------------===//
+//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -573,8 +573,8 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
}
}
-/// VtableComponent - Represents a single component in a vtable.
-class VtableComponent {
+/// VTableComponent - Represents a single component in a vtable.
+class VTableComponent {
public:
enum Kind {
CK_VCallOffset,
@@ -595,49 +595,49 @@ public:
CK_UnusedFunctionPointer
};
- static VtableComponent MakeVCallOffset(int64_t Offset) {
- return VtableComponent(CK_VCallOffset, Offset);
+ static VTableComponent MakeVCallOffset(int64_t Offset) {
+ return VTableComponent(CK_VCallOffset, Offset);
}
- static VtableComponent MakeVBaseOffset(int64_t Offset) {
- return VtableComponent(CK_VBaseOffset, Offset);
+ static VTableComponent MakeVBaseOffset(int64_t Offset) {
+ return VTableComponent(CK_VBaseOffset, Offset);
}
- static VtableComponent MakeOffsetToTop(int64_t Offset) {
- return VtableComponent(CK_OffsetToTop, Offset);
+ static VTableComponent MakeOffsetToTop(int64_t Offset) {
+ return VTableComponent(CK_OffsetToTop, Offset);
}
- static VtableComponent MakeRTTI(const CXXRecordDecl *RD) {
- return VtableComponent(CK_RTTI, reinterpret_cast<uintptr_t>(RD));
+ static VTableComponent MakeRTTI(const CXXRecordDecl *RD) {
+ return VTableComponent(CK_RTTI, reinterpret_cast<uintptr_t>(RD));
}
- static VtableComponent MakeFunction(const CXXMethodDecl *MD) {
+ static VTableComponent MakeFunction(const CXXMethodDecl *MD) {
assert(!isa<CXXDestructorDecl>(MD) &&
"Don't use MakeFunction with destructors!");
- return VtableComponent(CK_FunctionPointer,
+ return VTableComponent(CK_FunctionPointer,
reinterpret_cast<uintptr_t>(MD));
}
- static VtableComponent MakeCompleteDtor(const CXXDestructorDecl *DD) {
- return VtableComponent(CK_CompleteDtorPointer,
+ static VTableComponent MakeCompleteDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_CompleteDtorPointer,
reinterpret_cast<uintptr_t>(DD));
}
- static VtableComponent MakeDeletingDtor(const CXXDestructorDecl *DD) {
- return VtableComponent(CK_DeletingDtorPointer,
+ static VTableComponent MakeDeletingDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_DeletingDtorPointer,
reinterpret_cast<uintptr_t>(DD));
}
- static VtableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
+ static VTableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
assert(!isa<CXXDestructorDecl>(MD) &&
"Don't use MakeUnusedFunction with destructors!");
- return VtableComponent(CK_UnusedFunctionPointer,
+ return VTableComponent(CK_UnusedFunctionPointer,
reinterpret_cast<uintptr_t>(MD));
}
- static VtableComponent getFromOpaqueInteger(uint64_t I) {
- return VtableComponent(I);
+ static VTableComponent getFromOpaqueInteger(uint64_t I) {
+ return VTableComponent(I);
}
/// getKind - Get the kind of this vtable component.
@@ -689,7 +689,7 @@ public:
}
private:
- VtableComponent(Kind ComponentKind, int64_t Offset) {
+ VTableComponent(Kind ComponentKind, int64_t Offset) {
assert((ComponentKind == CK_VCallOffset ||
ComponentKind == CK_VBaseOffset ||
ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
@@ -698,7 +698,7 @@ private:
Value = ((Offset << 3) | ComponentKind);
}
- VtableComponent(Kind ComponentKind, uintptr_t Ptr) {
+ VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
assert((ComponentKind == CK_RTTI ||
ComponentKind == CK_FunctionPointer ||
ComponentKind == CK_CompleteDtorPointer ||
@@ -729,7 +729,7 @@ private:
return static_cast<uintptr_t>(Value & ~7ULL);
}
- explicit VtableComponent(uint64_t Value)
+ explicit VTableComponent(uint64_t Value)
: Value(Value) { }
/// The kind is stored in the lower 3 bits of the value. For offsets, we
@@ -855,8 +855,8 @@ private:
ASTContext &Context;
/// Components - vcall and vbase offset components
- typedef llvm::SmallVector<VtableComponent, 64> VtableComponentVectorTy;
- VtableComponentVectorTy Components;
+ typedef llvm::SmallVector<VTableComponent, 64> VTableComponentVectorTy;
+ VTableComponentVectorTy Components;
/// VisitedVirtualBases - Visited virtual bases.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
@@ -902,7 +902,7 @@ public:
}
/// Methods for iterating over the components.
- typedef VtableComponentVectorTy::const_reverse_iterator const_iterator;
+ typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
const_iterator components_begin() const { return Components.rbegin(); }
const_iterator components_end() const { return Components.rend(); }
@@ -982,30 +982,17 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
// Handle the primary base first.
- if (PrimaryBase) {
- uint64_t PrimaryBaseOffset;
-
+ // We only want to add vcall offsets if the base is non-virtual; a virtual
+ // primary base will have its vcall and vbase offsets emitted already.
+ if (PrimaryBase && !Layout.getPrimaryBaseWasVirtual()) {
// Get the base offset of the primary base.
- if (Layout.getPrimaryBaseWasVirtual()) {
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
- "Primary vbase should have a zero offset!");
-
- const ASTRecordLayout &MostDerivedClassLayout =
- Context.getASTRecordLayout(MostDerivedClass);
-
- PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
- } else {
- assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
- "Primary base should have a zero offset!");
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
- PrimaryBaseOffset = Base.getBaseOffset();
- }
-
- AddVCallOffsets(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
VBaseOffset);
}
-
+
// Add the vcall offsets.
for (CXXRecordDecl::method_iterator I = RD->method_begin(),
E = RD->method_end(); I != E; ++I) {
@@ -1034,7 +1021,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
Offset = (int64_t)(Overrider.Offset - VBaseOffset) / 8;
}
- Components.push_back(VtableComponent::MakeVCallOffset(Offset));
+ Components.push_back(VTableComponent::MakeVCallOffset(Offset));
}
// And iterate over all non-virtual bases (ignoring the primary base).
@@ -1082,7 +1069,7 @@ void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
int64_t VBaseOffsetOffset = getCurrentOffsetOffset();
VBaseOffsetOffsets.insert(std::make_pair(BaseDecl, VBaseOffsetOffset));
- Components.push_back(VtableComponent::MakeVBaseOffset(Offset));
+ Components.push_back(VTableComponent::MakeVBaseOffset(Offset));
}
// Check the base class looking for more vbase offsets.
@@ -1090,8 +1077,8 @@ void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
}
}
-/// VtableBuilder - Class for building vtable layout information.
-class VtableBuilder {
+/// VTableBuilder - Class for building vtable layout information.
+class VTableBuilder {
public:
/// PrimaryBasesSetVectorTy - A set vector of direct and indirect
/// primary bases.
@@ -1140,7 +1127,7 @@ private:
VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
/// Components - The components of the vtable being built.
- llvm::SmallVector<VtableComponent, 64> Components;
+ llvm::SmallVector<VTableComponent, 64> Components;
/// AddressPoints - Address points for the vtable being built.
AddressPointsMapTy AddressPoints;
@@ -1155,17 +1142,17 @@ private:
/// method.
const uint64_t BaseOffsetInLayoutClass;
- /// VtableIndex - The index in the vtable that this method has.
+ /// VTableIndex - The index in the vtable that this method has.
/// (For destructors, this is the index of the complete destructor).
- const uint64_t VtableIndex;
+ const uint64_t VTableIndex;
MethodInfo(uint64_t BaseOffset, uint64_t BaseOffsetInLayoutClass,
- uint64_t VtableIndex)
+ uint64_t VTableIndex)
: BaseOffset(BaseOffset),
BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
- VtableIndex(VtableIndex) { }
+ VTableIndex(VTableIndex) { }
- MethodInfo() : BaseOffset(0), BaseOffsetInLayoutClass(0), VtableIndex(0) { }
+ MethodInfo() : BaseOffset(0), BaseOffsetInLayoutClass(0), VTableIndex(0) { }
};
typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
@@ -1174,11 +1161,11 @@ private:
/// currently building.
MethodInfoMapTy MethodInfoMap;
- typedef llvm::DenseMap<uint64_t, ThunkInfo> VtableThunksMapTy;
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
/// VTableThunks - The thunks by vtable index in the vtable currently being
/// built.
- VtableThunksMapTy VTableThunks;
+ VTableThunksMapTy VTableThunks;
typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
@@ -1253,22 +1240,29 @@ private:
uint64_t FirstBaseOffsetInLayoutClass,
PrimaryBasesSetVectorTy &PrimaryBases);
- // LayoutVtable - Layout the vtable for the given base class, including its
+ // LayoutVTable - Layout the vtable for the given base class, including its
// secondary vtables and any vtables for virtual bases.
- void LayoutVtable();
+ void LayoutVTable();
- /// LayoutPrimaryAndSecondaryVtables - Layout the primary vtable for the
+ /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
/// given base subobject, as well as all its secondary vtables.
- void LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
- bool BaseIsVirtual,
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
+ /// in the layout class.
+ void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
uint64_t OffsetInLayoutClass);
- /// LayoutSecondaryVtables - Layout the secondary vtables for the given base
+ /// LayoutSecondaryVTables - Layout the secondary vtables for the given base
/// subobject.
///
/// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
/// or a direct or indirect base of a virtual base.
- void LayoutSecondaryVtables(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
uint64_t OffsetInLayoutClass);
/// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
@@ -1277,19 +1271,19 @@ private:
uint64_t OffsetInLayoutClass,
VisitedVirtualBasesSetTy &VBases);
- /// LayoutVtablesForVirtualBases - Layout vtables for all virtual bases of the
+ /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
/// given base (excluding any primary bases).
- void LayoutVtablesForVirtualBases(const CXXRecordDecl *RD,
+ void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
VisitedVirtualBasesSetTy &VBases);
- /// isBuildingConstructionVtable - Return whether this vtable builder is
+ /// isBuildingConstructionVTable - Return whether this vtable builder is
/// building a construction vtable.
- bool isBuildingConstructorVtable() const {
+ bool isBuildingConstructorVTable() const {
return MostDerivedClass != LayoutClass;
}
public:
- VtableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass,
+ VTableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass,
uint64_t MostDerivedClassOffset, bool MostDerivedClassIsVirtual,
const CXXRecordDecl *LayoutClass)
: VTables(VTables), MostDerivedClass(MostDerivedClass),
@@ -1298,7 +1292,7 @@ public:
LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
- LayoutVtable();
+ LayoutVTable();
}
ThunksMapTy::const_iterator thunks_begin() const {
@@ -1335,11 +1329,11 @@ public:
return AddressPoints.end();
}
- VtableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
return VTableThunks.begin();
}
- VtableThunksMapTy::const_iterator vtable_thunks_end() const {
+ VTableThunksMapTy::const_iterator vtable_thunks_end() const {
return VTableThunks.end();
}
@@ -1347,8 +1341,8 @@ public:
void dumpLayout(llvm::raw_ostream&);
};
-void VtableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
- assert(!isBuildingConstructorVtable() &&
+void VTableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
+ assert(!isBuildingConstructorVTable() &&
"Can't add thunks for construction vtable");
llvm::SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
@@ -1361,27 +1355,26 @@ void VtableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
ThunksVector.push_back(Thunk);
}
-/// OverridesMethodInBases - Checks whether whether this virtual member
-/// function overrides a member function in any of the given bases.
-/// Returns the overridden member function, or null if none was found.
-static const CXXMethodDecl *
-OverridesMethodInBases(const CXXMethodDecl *MD,
- VtableBuilder::PrimaryBasesSetVectorTy &Bases) {
+typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
+
+/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
+/// the overridden methods that the function decl overrides.
+static void
+ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
+ OverriddenMethodsSetTy& OverriddenMethods) {
+ assert(MD->isVirtual() && "Method is not virtual!");
+
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
E = MD->end_overridden_methods(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
- const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent();
- assert(OverriddenMD->isCanonicalDecl() &&
- "Should have the canonical decl of the overridden RD!");
- if (Bases.count(OverriddenRD))
- return OverriddenMD;
+ OverriddenMethods.insert(OverriddenMD);
+
+ ComputeAllOverriddenMethods(OverriddenMD, OverriddenMethods);
}
-
- return 0;
}
-void VtableBuilder::ComputeThisAdjustments() {
+void VTableBuilder::ComputeThisAdjustments() {
// Now go through the method info map and see if any of the methods need
// 'this' pointer adjustments.
for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
@@ -1390,9 +1383,9 @@ void VtableBuilder::ComputeThisAdjustments() {
const MethodInfo &MethodInfo = I->second;
// Ignore adjustments for unused function pointers.
- uint64_t VtableIndex = MethodInfo.VtableIndex;
- if (Components[VtableIndex].getKind() ==
- VtableComponent::CK_UnusedFunctionPointer)
+ uint64_t VTableIndex = MethodInfo.VTableIndex;
+ if (Components[VTableIndex].getKind() ==
+ VTableComponent::CK_UnusedFunctionPointer)
continue;
// Get the final overrider for this method.
@@ -1407,7 +1400,7 @@ void VtableBuilder::ComputeThisAdjustments() {
// While the thunk itself might be needed by vtables in subclasses or
// in construction vtables, there doesn't seem to be a reason for using
// the thunk in this vtable. Still, we do so to match gcc.
- if (VTableThunks.lookup(VtableIndex).Return.isEmpty())
+ if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
continue;
}
@@ -1418,38 +1411,38 @@ void VtableBuilder::ComputeThisAdjustments() {
continue;
// Add it.
- VTableThunks[VtableIndex].This = ThisAdjustment;
+ VTableThunks[VTableIndex].This = ThisAdjustment;
if (isa<CXXDestructorDecl>(MD)) {
// Add an adjustment for the deleting destructor as well.
- VTableThunks[VtableIndex + 1].This = ThisAdjustment;
+ VTableThunks[VTableIndex + 1].This = ThisAdjustment;
}
}
/// Clear the method info map.
MethodInfoMap.clear();
- if (isBuildingConstructorVtable()) {
+ if (isBuildingConstructorVTable()) {
// We don't need to store thunk information for construction vtables.
return;
}
- for (VtableThunksMapTy::const_iterator I = VTableThunks.begin(),
+ for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
E = VTableThunks.end(); I != E; ++I) {
- const VtableComponent &Component = Components[I->first];
+ const VTableComponent &Component = Components[I->first];
const ThunkInfo &Thunk = I->second;
const CXXMethodDecl *MD;
switch (Component.getKind()) {
default:
llvm_unreachable("Unexpected vtable component kind!");
- case VtableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_FunctionPointer:
MD = Component.getFunctionDecl();
break;
- case VtableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
MD = Component.getDestructorDecl();
break;
- case VtableComponent::CK_DeletingDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer:
// We've already added the thunk when we saw the complete dtor pointer.
continue;
}
@@ -1459,7 +1452,7 @@ void VtableBuilder::ComputeThisAdjustments() {
}
}
-ReturnAdjustment VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
+ReturnAdjustment VTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
ReturnAdjustment Adjustment;
if (!Offset.isEmpty()) {
@@ -1474,11 +1467,6 @@ ReturnAdjustment VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
Offset.VirtualBase);
}
-
- // FIXME: Once the assert in getVirtualBaseOffsetOffset is back again,
- // we can get rid of this assert.
- assert(Adjustment.VBaseOffsetOffset != 0 &&
- "Invalid vbase offset offset!");
}
Adjustment.NonVirtual = Offset.NonVirtualOffset;
@@ -1488,7 +1476,7 @@ ReturnAdjustment VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
}
BaseOffset
-VtableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
BaseSubobject Derived) const {
const CXXRecordDecl *BaseRD = Base.getBase();
const CXXRecordDecl *DerivedRD = Derived.getBase();
@@ -1540,7 +1528,7 @@ VtableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
}
ThisAdjustment
-VtableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
+VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
uint64_t BaseOffsetInLayoutClass,
FinalOverriders::OverriderInfo Overrider) {
// Ignore adjustments for pure virtual member functions.
@@ -1587,22 +1575,22 @@ VtableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
}
void
-VtableBuilder::AddMethod(const CXXMethodDecl *MD,
+VTableBuilder::AddMethod(const CXXMethodDecl *MD,
ReturnAdjustment ReturnAdjustment) {
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
assert(ReturnAdjustment.isEmpty() &&
"Destructor can't have return adjustment!");
// Add both the complete destructor and the deleting destructor.
- Components.push_back(VtableComponent::MakeCompleteDtor(DD));
- Components.push_back(VtableComponent::MakeDeletingDtor(DD));
+ Components.push_back(VTableComponent::MakeCompleteDtor(DD));
+ Components.push_back(VTableComponent::MakeDeletingDtor(DD));
} else {
// Add the return adjustment if necessary.
if (!ReturnAdjustment.isEmpty())
VTableThunks[Components.size()].Return = ReturnAdjustment;
// Add the function.
- Components.push_back(VtableComponent::MakeFunction(MD));
+ Components.push_back(VTableComponent::MakeFunction(MD));
}
}
@@ -1616,19 +1604,16 @@ VtableBuilder::AddMethod(const CXXMethodDecl *MD,
/// struct C : B { virtual void f(); }
///
/// OverridesIndirectMethodInBase will return true if given C::f as the method
-/// and { A } as the set of bases.
+/// and { A } as the set of bases.
static bool
OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
- VtableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ if (Bases.count(MD->getParent()))
+ return true;
+
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
E = MD->end_overridden_methods(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
- const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent();
- assert(OverriddenMD->isCanonicalDecl() &&
- "Should have the canonical decl of the overridden RD!");
-
- if (Bases.count(OverriddenRD))
- return true;
// Check "indirect overriders".
if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
@@ -1639,7 +1624,7 @@ OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
}
bool
-VtableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
+VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
uint64_t BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
uint64_t FirstBaseOffsetInLayoutClass) const {
@@ -1657,7 +1642,7 @@ VtableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
return true;
- VtableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
PrimaryBases.insert(RD);
@@ -1705,15 +1690,18 @@ VtableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
/// from the nearest base. Returns null if no method was found.
static const CXXMethodDecl *
FindNearestOverriddenMethod(const CXXMethodDecl *MD,
- VtableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ OverriddenMethodsSetTy OverriddenMethods;
+ ComputeAllOverriddenMethods(MD, OverriddenMethods);
+
for (int I = Bases.size(), E = 0; I != E; --I) {
const CXXRecordDecl *PrimaryBase = Bases[I - 1];
// Now check the overriden methods.
- for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
- E = MD->end_overridden_methods(); I != E; ++I) {
+ for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
+ E = OverriddenMethods.end(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
-
+
// We found our overridden method.
if (OverriddenMD->getParent() == PrimaryBase)
return OverriddenMD;
@@ -1724,7 +1712,7 @@ FindNearestOverriddenMethod(const CXXMethodDecl *MD,
}
void
-VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
+VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
uint64_t FirstBaseOffsetInLayoutClass,
PrimaryBasesSetVectorTy &PrimaryBases) {
@@ -1792,7 +1780,7 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
MethodInfo MethodInfo(Base.getBaseOffset(),
BaseOffsetInLayoutClass,
- OverriddenMethodInfo.VtableIndex);
+ OverriddenMethodInfo.VTableIndex);
assert(!MethodInfoMap.count(MD) &&
"Should not have method info for this method yet!");
@@ -1804,7 +1792,7 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
// or indirect base class of a virtual base class, we need to emit a
// thunk if we ever have a class hierarchy where the base class is not
// a primary base in the complete object.
- if (!isBuildingConstructorVtable() && OverriddenMD != MD) {
+ if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
// Compute the this adjustment.
ThisAdjustment ThisAdjustment =
ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
@@ -1835,7 +1823,7 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
FirstBaseInPrimaryBaseChain,
FirstBaseOffsetInLayoutClass)) {
- Components.push_back(VtableComponent::MakeUnusedFunction(OverriderMD));
+ Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
continue;
}
@@ -1850,8 +1838,9 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
}
}
-void VtableBuilder::LayoutVtable() {
- LayoutPrimaryAndSecondaryVtables(BaseSubobject(MostDerivedClass, 0),
+void VTableBuilder::LayoutVTable() {
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass, 0),
+ /*BaseIsMorallyVirtual=*/false,
MostDerivedClassIsVirtual,
MostDerivedClassOffset);
@@ -1862,22 +1851,24 @@ void VtableBuilder::LayoutVtable() {
VBases);
VBases.clear();
- LayoutVtablesForVirtualBases(MostDerivedClass, VBases);
+ LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
}
void
-VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
- bool BaseIsVirtual,
+VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
uint64_t OffsetInLayoutClass) {
assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
// Add vcall and vbase offsets for this vtable.
VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
- Base, BaseIsVirtual, OffsetInLayoutClass);
+ Base, BaseIsVirtualInLayoutClass,
+ OffsetInLayoutClass);
Components.append(Builder.components_begin(), Builder.components_end());
// Check if we need to add these vcall offsets.
- if (BaseIsVirtual && !Builder.getVCallOffsets().empty()) {
+ if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
if (VCallOffsets.empty())
@@ -1893,10 +1884,10 @@ VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
// FIXME: We should not use / 8 here.
int64_t OffsetToTop = -(int64_t)(OffsetInLayoutClass -
MostDerivedClassOffset) / 8;
- Components.push_back(VtableComponent::MakeOffsetToTop(OffsetToTop));
+ Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop));
// Next, add the RTTI.
- Components.push_back(VtableComponent::MakeRTTI(MostDerivedClass));
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
uint64_t AddressPoint = Components.size();
@@ -1936,15 +1927,11 @@ VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
RD = PrimaryBase;
}
- bool BaseIsMorallyVirtual = BaseIsVirtual;
- if (isBuildingConstructorVtable() && Base.getBase() == MostDerivedClass)
- BaseIsMorallyVirtual = false;
-
// Layout secondary vtables.
- LayoutSecondaryVtables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
+ LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
}
-void VtableBuilder::LayoutSecondaryVtables(BaseSubobject Base,
+void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
bool BaseIsMorallyVirtual,
uint64_t OffsetInLayoutClass) {
// Itanium C++ ABI 2.5.2:
@@ -1969,7 +1956,7 @@ void VtableBuilder::LayoutSecondaryVtables(BaseSubobject Base,
if (!BaseDecl->isDynamicClass())
continue;
- if (isBuildingConstructorVtable()) {
+ if (isBuildingConstructorVTable()) {
// Itanium C++ ABI 2.6.4:
// Some of the base class subobjects may not need construction virtual
// tables, which will therefore not be present in the construction
@@ -1988,20 +1975,21 @@ void VtableBuilder::LayoutSecondaryVtables(BaseSubobject Base,
// Don't emit a secondary vtable for a primary base. We might however want
// to emit secondary vtables for other bases of this base.
if (BaseDecl == PrimaryBase) {
- LayoutSecondaryVtables(BaseSubobject(BaseDecl, BaseOffset),
+ LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
continue;
}
// Layout the primary vtable (and any secondary vtables) for this base.
- LayoutPrimaryAndSecondaryVtables(BaseSubobject(BaseDecl, BaseOffset),
- /*BaseIsVirtual=*/false,
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual,
+ /*BaseIsVirtualInLayoutClass=*/false,
BaseOffsetInLayoutClass);
}
}
void
-VtableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
uint64_t OffsetInLayoutClass,
VisitedVirtualBasesSetTy &VBases) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -2013,7 +2001,7 @@ VtableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
if (Layout.getPrimaryBaseWasVirtual()) {
bool IsPrimaryVirtualBase = true;
- if (isBuildingConstructorVtable()) {
+ if (isBuildingConstructorVTable()) {
// Check if the base is actually a primary base in the class we use for
// layout.
const ASTRecordLayout &LayoutClassLayout =
@@ -2059,7 +2047,7 @@ VtableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
}
void
-VtableBuilder::LayoutVtablesForVirtualBases(const CXXRecordDecl *RD,
+VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
VisitedVirtualBasesSetTy &VBases) {
// Itanium C++ ABI 2.5.2:
// Then come the virtual base virtual tables, also in inheritance graph
@@ -2084,22 +2072,23 @@ VtableBuilder::LayoutVtablesForVirtualBases(const CXXRecordDecl *RD,
uint64_t BaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(BaseDecl);
- LayoutPrimaryAndSecondaryVtables(BaseSubobject(BaseDecl, BaseOffset),
- /*BaseIsVirtual=*/true,
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsMorallyVirtual=*/true,
+ /*BaseIsVirtualInLayoutClass=*/true,
BaseOffsetInLayoutClass);
}
// We only need to check the base for virtual base vtables if it actually
// has virtual bases.
if (BaseDecl->getNumVBases())
- LayoutVtablesForVirtualBases(BaseDecl, VBases);
+ LayoutVTablesForVirtualBases(BaseDecl, VBases);
}
}
/// dumpLayout - Dump the vtable layout.
-void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
+void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
- if (isBuildingConstructorVtable()) {
+ if (isBuildingConstructorVTable()) {
Out << "Construction vtable for ('";
Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
// FIXME: Don't use / 8 .
@@ -2129,28 +2118,28 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
Out << llvm::format("%4d | ", I);
- const VtableComponent &Component = Components[I];
+ const VTableComponent &Component = Components[I];
// Dump the component.
switch (Component.getKind()) {
- case VtableComponent::CK_VCallOffset:
+ case VTableComponent::CK_VCallOffset:
Out << "vcall_offset (" << Component.getVCallOffset() << ")";
break;
- case VtableComponent::CK_VBaseOffset:
+ case VTableComponent::CK_VBaseOffset:
Out << "vbase_offset (" << Component.getVBaseOffset() << ")";
break;
- case VtableComponent::CK_OffsetToTop:
+ case VTableComponent::CK_OffsetToTop:
Out << "offset_to_top (" << Component.getOffsetToTop() << ")";
break;
- case VtableComponent::CK_RTTI:
+ case VTableComponent::CK_RTTI:
Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
break;
- case VtableComponent::CK_FunctionPointer: {
+ case VTableComponent::CK_FunctionPointer: {
const CXXMethodDecl *MD = Component.getFunctionDecl();
std::string Str =
@@ -2192,10 +2181,10 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
break;
}
- case VtableComponent::CK_CompleteDtorPointer:
- case VtableComponent::CK_DeletingDtorPointer: {
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
bool IsComplete =
- Component.getKind() == VtableComponent::CK_CompleteDtorPointer;
+ Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
const CXXDestructorDecl *DD = Component.getDestructorDecl();
@@ -2227,7 +2216,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
break;
}
- case VtableComponent::CK_UnusedFunctionPointer: {
+ case VTableComponent::CK_UnusedFunctionPointer: {
const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
std::string Str =
@@ -2279,7 +2268,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
Out << '\n';
- if (isBuildingConstructorVtable())
+ if (isBuildingConstructorVTable())
return;
if (MostDerivedClass->getNumVBases()) {
@@ -2373,7 +2362,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
}
-void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
+void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
// Itanium C++ ABI 2.5.2:
// The order of the virtual function pointers in a virtual table is the
@@ -2400,7 +2389,7 @@ void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
// Collect all the primary bases, so we can check whether methods override
// a method from the base.
- VtableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
for (ASTRecordLayout::primary_base_info_iterator
I = Layout.primary_base_begin(), E = Layout.primary_base_end();
I != E; ++I)
@@ -2418,7 +2407,7 @@ void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
// Check if this method overrides a method in the primary base.
if (const CXXMethodDecl *OverriddenMD =
- OverridesMethodInBases(MD, PrimaryBases)) {
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
// Check if converting from the return type of the method to the
// return type of the overridden method requires conversion.
if (ComputeReturnAdjustmentBaseOffset(CGM.getContext(), MD,
@@ -2430,12 +2419,12 @@ void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
cast<CXXDestructorDecl>(OverriddenMD);
// Add both the complete and deleting entries.
- MethodVtableIndices[GlobalDecl(DD, Dtor_Complete)] =
- getMethodVtableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
- MethodVtableIndices[GlobalDecl(DD, Dtor_Deleting)] =
- getMethodVtableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
} else {
- MethodVtableIndices[MD] = getMethodVtableIndex(OverriddenMD);
+ MethodVTableIndices[MD] = getMethodVTableIndex(OverriddenMD);
}
// We don't need to add an entry for this method.
@@ -2452,13 +2441,13 @@ void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
}
// Add the complete dtor.
- MethodVtableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
// Add the deleting dtor.
- MethodVtableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
} else {
// Add the entry.
- MethodVtableIndices[MD] = CurrentIndex++;
+ MethodVTableIndices[MD] = CurrentIndex++;
}
}
@@ -2468,11 +2457,11 @@ void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
// its entries come after the declared virtual function pointers.
// Add the complete dtor.
- MethodVtableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] =
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] =
CurrentIndex++;
// Add the deleting dtor.
- MethodVtableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] =
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] =
CurrentIndex++;
}
@@ -2485,24 +2474,24 @@ uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD)
if (I != NumVirtualFunctionPointers.end())
return I->second;
- ComputeMethodVtableIndices(RD);
+ ComputeMethodVTableIndices(RD);
I = NumVirtualFunctionPointers.find(RD);
assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!");
return I->second;
}
-uint64_t CodeGenVTables::getMethodVtableIndex(GlobalDecl GD) {
- MethodVtableIndicesTy::iterator I = MethodVtableIndices.find(GD);
- if (I != MethodVtableIndices.end())
+uint64_t CodeGenVTables::getMethodVTableIndex(GlobalDecl GD) {
+ MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
+ if (I != MethodVTableIndices.end())
return I->second;
const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
- ComputeMethodVtableIndices(RD);
+ ComputeMethodVTableIndices(RD);
- I = MethodVtableIndices.find(GD);
- assert(I != MethodVtableIndices.end() && "Did not find index!");
+ I = MethodVTableIndices.find(GD);
+ assert(I != MethodVTableIndices.end() && "Did not find index!");
return I->second;
}
@@ -2530,13 +2519,6 @@ int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
}
I = VirtualBaseClassOffsetOffsets.find(ClassPair);
-
- // FIXME: The assertion below assertion currently fails with the old vtable
- /// layout code if there is a non-virtual thunk adjustment in a vtable.
- // Once the new layout is in place, this return should be removed.
- if (I == VirtualBaseClassOffsetOffsets.end())
- return 0;
-
assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
return I->second;
@@ -2544,6 +2526,9 @@ int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
uint64_t
CodeGenVTables::getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD) {
+ assert(AddressPoints.count(std::make_pair(RD, Base)) &&
+ "Did not find address point!");
+
uint64_t AddressPoint = AddressPoints.lookup(std::make_pair(RD, Base));
assert(AddressPoint && "Address point must not be zero!");
@@ -2562,7 +2547,7 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
else
getMangleContext().mangleThunk(MD, Thunk, Name);
- const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD);
+ const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(MD);
return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
}
@@ -2745,7 +2730,7 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
// There's already a declaration with the same name, check if it has the same
// type or if we need to replace it.
if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
- CGM.getTypes().GetFunctionTypeForVtable(MD)) {
+ CGM.getTypes().GetFunctionTypeForVTable(MD)) {
llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
// If the types mismatch then we have to rewrite the definition.
@@ -2804,7 +2789,7 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
if (LayoutData)
return;
- VtableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+ VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
// Add the VTable layout.
uint64_t NumVTableComponents = Builder.getNumVTableComponents();
@@ -2833,7 +2818,7 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
std::sort(VTableThunks.begin(), VTableThunks.end());
// Add the address points.
- for (VtableBuilder::AddressPointsMapTy::const_iterator I =
+ for (VTableBuilder::AddressPointsMapTy::const_iterator I =
Builder.address_points_begin(), E = Builder.address_points_end();
I != E; ++I) {
@@ -2858,7 +2843,7 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
return;
- for (VtableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ for (VTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
Builder.getVBaseOffsetOffsets().begin(),
E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
// Insert all types.
@@ -2888,43 +2873,43 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
llvm::Constant* PureVirtualFn = 0;
for (unsigned I = 0; I != NumComponents; ++I) {
- VtableComponent Component =
- VtableComponent::getFromOpaqueInteger(Components[I]);
+ VTableComponent Component =
+ VTableComponent::getFromOpaqueInteger(Components[I]);
llvm::Constant *Init = 0;
switch (Component.getKind()) {
- case VtableComponent::CK_VCallOffset:
+ case VTableComponent::CK_VCallOffset:
Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
- case VtableComponent::CK_VBaseOffset:
+ case VTableComponent::CK_VBaseOffset:
Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
- case VtableComponent::CK_OffsetToTop:
+ case VTableComponent::CK_OffsetToTop:
Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop());
Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
break;
- case VtableComponent::CK_RTTI:
+ case VTableComponent::CK_RTTI:
Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
break;
- case VtableComponent::CK_FunctionPointer:
- case VtableComponent::CK_CompleteDtorPointer:
- case VtableComponent::CK_DeletingDtorPointer: {
+ case VTableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
GlobalDecl GD;
// Get the right global decl.
switch (Component.getKind()) {
default:
llvm_unreachable("Unexpected vtable component kind");
- case VtableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_FunctionPointer:
GD = Component.getFunctionDecl();
break;
- case VtableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
break;
- case VtableComponent::CK_DeletingDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer:
GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
break;
}
@@ -2953,7 +2938,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
NextVTableThunkIndex++;
} else {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVtable(MD);
+ const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(MD);
Init = CGM.GetAddrOfFunction(GD, Ty);
}
@@ -2963,7 +2948,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
break;
}
- case VtableComponent::CK_UnusedFunctionPointer:
+ case VTableComponent::CK_UnusedFunctionPointer:
Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
break;
};
@@ -3020,7 +3005,7 @@ GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name,
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXVtable(RD, OutName);
+ CGM.getMangleContext().mangleCXXVTable(RD, OutName);
llvm::StringRef Name = OutName.str();
ComputeVTableRelatedInformation(RD);
@@ -3038,8 +3023,8 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
// Dump the vtable layout if necessary.
- if (CGM.getLangOptions().DumpVtableLayouts) {
- VtableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+ if (CGM.getLangOptions().DumpVTableLayouts) {
+ VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
Builder.dumpLayout(llvm::errs());
}
@@ -3064,11 +3049,11 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
const BaseSubobject &Base,
bool BaseIsVirtual,
VTableAddressPointsMapTy& AddressPoints) {
- VtableBuilder Builder(*this, Base.getBase(), Base.getBaseOffset(),
+ VTableBuilder Builder(*this, Base.getBase(), Base.getBaseOffset(),
/*MostDerivedClassIsVirtual=*/BaseIsVirtual, RD);
// Dump the vtable layout if necessary.
- if (CGM.getLangOptions().DumpVtableLayouts)
+ if (CGM.getLangOptions().DumpVTableLayouts)
Builder.dumpLayout(llvm::errs());
// Add the address points.
@@ -3077,7 +3062,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Get the mangled construction vtable name.
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXCtorVtable(RD, Base.getBaseOffset() / 8,
+ CGM.getMangleContext().mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8,
Base.getBase(), OutName);
llvm::StringRef Name = OutName.str();
@@ -3111,9 +3096,9 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
void
CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
- llvm::GlobalVariable *&VTable = Vtables[RD];
+ llvm::GlobalVariable *&VTable = VTables[RD];
if (VTable) {
- assert(VTable->getInitializer() && "Vtable doesn't have a definition!");
+ assert(VTable->getInitializer() && "VTable doesn't have a definition!");
return;
}
@@ -3121,6 +3106,18 @@ CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
EmitVTableDefinition(VTable, Linkage, RD);
GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = RD->getDeclContext();
+ if (RD->getIdentifier() &&
+ RD->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) &&
+ cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit())
+ CGM.EmitFundamentalRTTIDescriptors();
}
void CodeGenVTables::EmitVTableRelatedData(GlobalDecl GD) {
@@ -3160,11 +3157,11 @@ void CodeGenVTables::EmitVTableRelatedData(GlobalDecl GD) {
return;
}
- if (Vtables.count(RD))
+ if (VTables.count(RD))
return;
if (RDKind == TSK_ImplicitInstantiation)
- CGM.DeferredVtables.push_back(RD);
+ CGM.DeferredVTables.push_back(RD);
else
- GenerateClassData(CGM.getVtableLinkage(RD), RD);
+ GenerateClassData(CGM.getVTableLinkage(RD), RD);
}
diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVTables.h
index 6073555..6c18ca8 100644
--- a/lib/CodeGen/CGVtable.h
+++ b/lib/CodeGen/CGVTables.h
@@ -1,4 +1,4 @@
-//===--- CGVtable.h - Emit LLVM Code for C++ vtables ----------------------===//
+//===--- CGVTables.h - Emit LLVM Code for C++ vtables ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -180,10 +180,10 @@ namespace CodeGen {
class CodeGenVTables {
CodeGenModule &CGM;
- /// MethodVtableIndices - Contains the index (relative to the vtable address
+ /// MethodVTableIndices - Contains the index (relative to the vtable address
/// point) where the function pointer for a virtual function is stored.
- typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVtableIndicesTy;
- MethodVtableIndicesTy MethodVtableIndices;
+ typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
+ MethodVTableIndicesTy MethodVTableIndices;
typedef std::pair<const CXXRecordDecl *,
const CXXRecordDecl *> ClassPairTy;
@@ -195,8 +195,8 @@ class CodeGenVTables {
VirtualBaseClassOffsetOffsetsMapTy;
VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
- /// Vtables - All the vtables which have been defined.
- llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> Vtables;
+ /// VTables - All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
/// NumVirtualFunctionPointers - Contains the number of virtual function
/// pointers in the vtable for a given record decl.
@@ -216,8 +216,8 @@ class CodeGenVTables {
/// integers are the vtable components.
VTableLayoutMapTy VTableLayoutMap;
- typedef llvm::DenseMap<std::pair<const CXXRecordDecl *,
- BaseSubobject>, uint64_t> AddressPointsMapTy;
+ typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> AddressPointsMapTy;
/// Address points - Address points for all vtables.
AddressPointsMapTy AddressPoints;
@@ -247,14 +247,12 @@ class CodeGenVTables {
return &Components[1];
}
- typedef llvm::DenseMap<ClassPairTy, uint64_t> SubVTTIndiciesMapTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
/// SubVTTIndicies - Contains indices into the various sub-VTTs.
SubVTTIndiciesMapTy SubVTTIndicies;
-
- typedef llvm::DenseMap<std::pair<const CXXRecordDecl *,
- BaseSubobject>, uint64_t>
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
SecondaryVirtualPointerIndicesMapTy;
/// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer
@@ -265,7 +263,7 @@ class CodeGenVTables {
/// pointers in the vtable for a given record decl.
uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
- void ComputeMethodVtableIndices(const CXXRecordDecl *RD);
+ void ComputeMethodVTableIndices(const CXXRecordDecl *RD);
llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
bool GenerateDefinition,
@@ -295,6 +293,15 @@ public:
CodeGenVTables(CodeGenModule &CGM)
: CGM(CGM) { }
+ // isKeyFunctionInAnotherTU - True if this record has a key function and it is
+ // in another translation unit.
+ static bool isKeyFunctionInAnotherTU(ASTContext &Context,
+ const CXXRecordDecl *RD) {
+ assert (RD->isDynamicClass() && "Non dynamic classes have no key.");
+ const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
+ return KeyFunction && !KeyFunction->getBody();
+ }
+
/// needsVTTParameter - Return whether the given global decl needs a VTT
/// parameter, which it does if it's a base constructor or destructor with
/// virtual bases.
@@ -302,17 +309,17 @@ public:
/// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
/// given record decl.
- uint64_t getSubVTTIndex(const CXXRecordDecl *RD, const CXXRecordDecl *Base);
+ uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base);
/// getSecondaryVirtualPointerIndex - Return the index in the VTT where the
/// virtual pointer for the given subobject is located.
uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
BaseSubobject Base);
- /// getMethodVtableIndex - Return the index (relative to the vtable address
+ /// getMethodVTableIndex - Return the index (relative to the vtable address
/// point) where the function pointer for the given virtual function is
/// stored.
- uint64_t getMethodVtableIndex(GlobalDecl GD);
+ uint64_t getMethodVTableIndex(GlobalDecl GD);
/// getVirtualBaseOffsetOffset - Return the offset in bytes (relative to the
/// vtable address point) where the offset of the virtual base that contains
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 91fb714..92ef9dc 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -34,69 +34,64 @@ namespace CodeGen {
/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
/// address of an aggregate value in memory.
class RValue {
- llvm::Value *V1, *V2;
- // TODO: Encode this into the low bit of pointer for more efficient
- // return-by-value.
- enum { Scalar, Complex, Aggregate } Flavor;
+ enum Flavor { Scalar, Complex, Aggregate };
- bool Volatile:1;
-public:
+ // Stores first value and flavor.
+ llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
+ // Stores second value and volatility.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
- bool isScalar() const { return Flavor == Scalar; }
- bool isComplex() const { return Flavor == Complex; }
- bool isAggregate() const { return Flavor == Aggregate; }
+public:
+ bool isScalar() const { return V1.getInt() == Scalar; }
+ bool isComplex() const { return V1.getInt() == Complex; }
+ bool isAggregate() const { return V1.getInt() == Aggregate; }
- bool isVolatileQualified() const { return Volatile; }
+ bool isVolatileQualified() const { return V2.getInt(); }
/// getScalarVal() - Return the Value* of this scalar value.
llvm::Value *getScalarVal() const {
assert(isScalar() && "Not a scalar!");
- return V1;
+ return V1.getPointer();
}
/// getComplexVal - Return the real/imag components of this complex value.
///
std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
- return std::pair<llvm::Value *, llvm::Value *>(V1, V2);
+ return std::make_pair(V1.getPointer(), V2.getPointer());
}
/// getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value *getAggregateAddr() const {
assert(isAggregate() && "Not an aggregate!");
- return V1;
+ return V1.getPointer();
}
static RValue get(llvm::Value *V) {
RValue ER;
- ER.V1 = V;
- ER.Flavor = Scalar;
- ER.Volatile = false;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Scalar);
+ ER.V2.setInt(false);
return ER;
}
static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
RValue ER;
- ER.V1 = V1;
- ER.V2 = V2;
- ER.Flavor = Complex;
- ER.Volatile = false;
+ ER.V1.setPointer(V1);
+ ER.V2.setPointer(V2);
+ ER.V1.setInt(Complex);
+ ER.V2.setInt(false);
return ER;
}
static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
- RValue ER;
- ER.V1 = C.first;
- ER.V2 = C.second;
- ER.Flavor = Complex;
- ER.Volatile = false;
- return ER;
+ return getComplex(C.first, C.second);
}
// FIXME: Aggregate rvalues need to retain information about whether they are
// volatile or not. Remove default to find all places that probably get this
// wrong.
- static RValue getAggregate(llvm::Value *V, bool Vol = false) {
+ static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
RValue ER;
- ER.V1 = V;
- ER.Flavor = Aggregate;
- ER.Volatile = Vol;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Aggregate);
+ ER.V2.setInt(Volatile);
return ER;
}
};
@@ -220,7 +215,7 @@ public:
}
// bitfield lvalue
- llvm::Value *getBitFieldAddr() const {
+ llvm::Value *getBitFieldBaseAddr() const {
assert(isBitField());
return V;
}
@@ -269,11 +264,17 @@ public:
return R;
}
- static LValue MakeBitfield(llvm::Value *V, const CGBitFieldInfo &Info,
+ /// \brief Create a new object to represent a bit-field access.
+ ///
+ /// \param BaseValue - The base address of the structure containing the
+ /// bit-field.
+ /// \param Info - The information describing how to perform the bit-field
+ /// access.
+ static LValue MakeBitfield(llvm::Value *BaseValue, const CGBitFieldInfo &Info,
unsigned CVR) {
LValue R;
R.LVType = BitField;
- R.V = V;
+ R.V = BaseValue;
R.BitFieldInfo = &Info;
R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
return R;
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index e72a1d9..dfd2a39 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -23,7 +23,7 @@ add_clang_library(clangCodeGen
CGRTTI.cpp
CGStmt.cpp
CGTemporaries.cpp
- CGVtable.cpp
+ CGVTables.cpp
CGVTT.cpp
CodeGenFunction.cpp
CodeGenModule.cpp
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index f38d8a1..d3bf164 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -246,15 +246,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
-
- Stmt *Body = FD->getBody();
- if (Body)
- EmitStmt(Body);
- else {
- assert(FD->isImplicit() && "non-implicit function def has no body");
- assert(FD->isCopyAssignment() && "implicit function not copy assignment");
- SynthesizeCXXCopyAssignment(Args);
- }
+ assert(FD->getBody());
+ EmitStmt(FD->getBody());
}
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
@@ -480,6 +473,14 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
}
void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
+ // Ignore empty classes in C++.
+ if (getContext().getLangOptions().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ return;
+ }
+ }
+
const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
@@ -749,6 +750,11 @@ void CodeGenFunction::EmitCleanupBlock() {
return;
}
+ // Scrub debug location info.
+ for (llvm::BasicBlock::iterator LBI = Info.CleanupBlock->begin(),
+ LBE = Info.CleanupBlock->end(); LBI != LBE; ++LBI)
+ Builder.SetInstDebugLocation(LBI);
+
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
if (CurBB && !CurBB->getTerminator() &&
Info.CleanupBlock->getNumUses() == 0) {
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index f21350d..90a3ec4 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -32,6 +32,7 @@
namespace llvm {
class BasicBlock;
class LLVMContext;
+ class MDNode;
class Module;
class SwitchInst;
class Twine;
@@ -148,19 +149,17 @@ public:
/// block.
class EHCleanupBlock {
CodeGenFunction& CGF;
- llvm::BasicBlock *Cont;
+ llvm::BasicBlock *PreviousInsertionBlock;
llvm::BasicBlock *CleanupHandler;
- llvm::BasicBlock *CleanupEntryBB;
llvm::BasicBlock *PreviousInvokeDest;
public:
EHCleanupBlock(CodeGenFunction &cgf)
- : CGF(cgf), Cont(CGF.createBasicBlock("cont")),
- CleanupHandler(CGF.createBasicBlock("ehcleanup")),
- CleanupEntryBB(CGF.createBasicBlock("ehcleanup.rest")),
+ : CGF(cgf),
+ PreviousInsertionBlock(CGF.Builder.GetInsertBlock()),
+ CleanupHandler(CGF.createBasicBlock("ehcleanup", CGF.CurFn)),
PreviousInvokeDest(CGF.getInvokeDest()) {
- CGF.EmitBranch(Cont);
llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
- CGF.Builder.SetInsertPoint(CleanupEntryBB);
+ CGF.Builder.SetInsertPoint(CleanupHandler);
CGF.setInvokeDest(TerminateHandler);
}
~EHCleanupBlock();
@@ -186,7 +185,8 @@ public:
public:
DelayedCleanupBlock(CodeGenFunction &cgf, bool ehonly = false)
: CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()),
- CleanupEntryBB(CGF.createBasicBlock("cleanup")), CleanupExitBB(0),
+ CleanupEntryBB(CGF.createBasicBlock("cleanup")),
+ CleanupExitBB(0),
CurInvokeDest(CGF.getInvokeDest()),
EHOnly(ehonly) {
CGF.Builder.SetInsertPoint(CleanupEntryBB);
@@ -474,11 +474,15 @@ public:
/// GenerateObjCGetter - Synthesize an Objective-C property getter function.
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
+ void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD, bool ctor);
/// GenerateObjCSetter - Synthesize an Objective-C property setter function
/// for the given property.
void GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
+ bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
+ bool IvarTypeWithAggrGCObjects(QualType Ty);
//===--------------------------------------------------------------------===//
// Block Bits
@@ -532,14 +536,16 @@ public:
/// InitializeVTablePointer - Initialize the vtable pointer of the given
/// subobject.
///
- /// \param BaseIsMorallyVirtual - Whether the base subobject is a virtual base
- /// or a direct or indirect base of a virtual base.
- void InitializeVTablePointer(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ void InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass);
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
- void InitializeVTablePointers(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ void InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ uint64_t OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
llvm::Constant *VTable,
const CXXRecordDecl *VTableClass,
@@ -549,7 +555,6 @@ public:
void SynthesizeCXXCopyConstructor(const FunctionArgList &Args);
- void SynthesizeCXXCopyAssignment(const FunctionArgList &Args);
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
/// destructor. This is to call destructors on members and base classes in
@@ -670,6 +675,9 @@ public:
llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
const llvm::Twine &Name = "tmp");
+ /// InitTempAlloca - Provide an initial value for the given alloca.
+ void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
+
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
/// value needs to be stored into an alloca (for example, to avoid explicit
@@ -704,6 +712,12 @@ public:
RValue EmitAnyExprToTemp(const Expr *E, bool IsAggLocVolatile = false,
bool IsInitializer = false);
+ /// EmitsAnyExprToMem - Emits the code necessary to evaluate an
+ /// arbitrary expression into the given memory location.
+ void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
+ bool IsLocationVolatile = false,
+ bool IsInitializer = false);
+
/// EmitAggregateCopy - Emit an aggrate copy.
///
/// \param isVolatile - True iff either the source or the destination is
@@ -765,22 +779,23 @@ public:
}
/// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
- /// complete class down to one of its virtual bases.
- llvm::Value *GetAddressOfBaseOfCompleteClass(llvm::Value *Value,
- bool IsVirtual,
- const CXXRecordDecl *Derived,
- const CXXRecordDecl *Base);
-
+ /// complete class to the given direct base.
+ llvm::Value *
+ GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual);
+
/// GetAddressOfBaseClass - This function will add the necessary delta to the
/// load of 'this' and returns address of the base class.
- llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl,
+ llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
bool NullCheckValue);
-
+
llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *DerivedClassDecl,
+ const CXXRecordDecl *Derived,
+ const CXXBaseSpecifierArray &BasePath,
bool NullCheckValue);
llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
@@ -789,31 +804,17 @@ public:
void EmitClassAggrMemberwiseCopy(llvm::Value *DestValue,
llvm::Value *SrcValue,
- const ArrayType *Array,
- const CXXRecordDecl *BaseClassDecl,
- QualType Ty);
-
- void EmitClassAggrCopyAssignment(llvm::Value *DestValue,
- llvm::Value *SrcValue,
- const ArrayType *Array,
- const CXXRecordDecl *BaseClassDecl,
- QualType Ty);
+ const ConstantArrayType *Array,
+ const CXXRecordDecl *ClassDecl);
void EmitClassMemberwiseCopy(llvm::Value *DestValue, llvm::Value *SrcValue,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl,
- QualType Ty);
-
- void EmitClassCopyAssignment(llvm::Value *DestValue, llvm::Value *SrcValue,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl,
- QualType Ty);
+ const CXXRecordDecl *ClassDecl);
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::Value *This,
+ bool ForVirtualBase, llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd);
@@ -842,7 +843,7 @@ public:
llvm::Value *This);
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::Value *This);
+ bool ForVirtualBase, llvm::Value *This);
void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
void PopCXXTemporary();
@@ -1033,6 +1034,7 @@ public:
// Note: only availabe for agg return types
LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ LValue EmitCompoundAssignOperatorLValue(const CompoundAssignOperator *E);
// Note: only available for agg return types
LValue EmitCallExprLValue(const CallExpr *E);
// Note: only available for agg return types
@@ -1100,7 +1102,8 @@ public:
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
const CallArgList &Args,
- const Decl *TargetDecl = 0);
+ const Decl *TargetDecl = 0,
+ llvm::Instruction **callOrInvoke = 0);
RValue EmitCall(QualType FnType, llvm::Value *Callee,
ReturnValueSlot ReturnValue,
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index a2ad31e..cc90a28 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -35,6 +35,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace CodeGen;
@@ -47,7 +48,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()),
- MangleCtx(C), VTables(*this), Runtime(0), CFConstantStringClassRef(0),
+ MangleCtx(C, diags), VTables(*this), Runtime(0),
+ CFConstantStringClassRef(0),
+ NSConstantStringClassRef(0),
VMContext(M.getContext()) {
if (!Features.ObjC1)
@@ -78,7 +81,6 @@ void CodeGenModule::createObjCRuntime() {
}
void CodeGenModule::Release() {
- EmitFundamentalRTTIDescriptors();
EmitDeferred();
EmitCXXGlobalInitFunc();
EmitCXXGlobalDtorFunc();
@@ -313,7 +315,18 @@ GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
if (FD->getTemplateSpecializationKind()
== TSK_ExplicitInstantiationDeclaration)
return CodeGenModule::GVA_C99Inline;
-
+
+ // If this is a virtual method and its class has a key method in another
+ // translation unit, we know that this method will be present in that
+ // translation unit. In this translation unit we will use this method
+ // only for inlining and analysis. This is the semantics of c99 inline.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *RD = MD->getParent();
+ if (MD->isVirtual() &&
+ CodeGenVTables::isKeyFunctionInAnotherTU(Context, RD))
+ return CodeGenModule::GVA_C99Inline;
+ }
+
return CodeGenModule::GVA_CXXInline;
}
@@ -491,11 +504,11 @@ void CodeGenModule::EmitDeferred() {
// previously unused static decl may become used during the generation of code
// for a static function, iterate until no changes are made.
- while (!DeferredDeclsToEmit.empty() || !DeferredVtables.empty()) {
- if (!DeferredVtables.empty()) {
- const CXXRecordDecl *RD = DeferredVtables.back();
- DeferredVtables.pop_back();
- getVTables().GenerateClassData(getVtableLinkage(RD), RD);
+ while (!DeferredDeclsToEmit.empty() || !DeferredVTables.empty()) {
+ if (!DeferredVTables.empty()) {
+ const CXXRecordDecl *RD = DeferredVTables.back();
+ DeferredVTables.pop_back();
+ getVTables().GenerateClassData(getVTableLinkage(RD), RD);
continue;
}
@@ -687,30 +700,30 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Defer code generation when possible if this is a static definition, inline
// function etc. These we only want to emit if they are used.
- if (MayDeferGeneration(Global)) {
- // If the value has already been used, add it directly to the
- // DeferredDeclsToEmit list.
- MangleBuffer MangledName;
- getMangledName(MangledName, GD);
- if (GetGlobalValue(MangledName))
- DeferredDeclsToEmit.push_back(GD);
- else {
- // Otherwise, remember that we saw a deferred decl with this name. The
- // first use of the mangled name will cause it to move into
- // DeferredDeclsToEmit.
- DeferredDecls[MangledName] = GD;
- }
+ if (!MayDeferGeneration(Global)) {
+ // Emit the definition if it can't be deferred.
+ EmitGlobalDefinition(GD);
return;
}
-
- // Otherwise emit the definition.
- EmitGlobalDefinition(GD);
+
+ // If the value has already been used, add it directly to the
+ // DeferredDeclsToEmit list.
+ MangleBuffer MangledName;
+ getMangledName(MangledName, GD);
+ if (GetGlobalValue(MangledName))
+ DeferredDeclsToEmit.push_back(GD);
+ else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
}
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
- PrettyStackTraceDecl CrashInfo((ValueDecl *)D, D->getLocation(),
+ PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
Context.getSourceManager(),
"Generating code for declaration");
@@ -718,16 +731,18 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
getVTables().EmitVTableRelatedData(GD);
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
- EmitCXXConstructor(CD, GD.getCtorType());
- else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
- EmitCXXDestructor(DD, GD.getDtorType());
- else if (isa<FunctionDecl>(D))
- EmitGlobalFunctionDefinition(GD);
- else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
- EmitGlobalVarDefinition(VD);
- else {
- assert(0 && "Invalid argument to EmitGlobalDefinition()");
- }
+ return EmitCXXConstructor(CD, GD.getCtorType());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ return EmitCXXDestructor(DD, GD.getDtorType());
+
+ if (isa<FunctionDecl>(D))
+ return EmitGlobalFunctionDefinition(GD);
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return EmitGlobalVarDefinition(VD);
+
+ assert(0 && "Invalid argument to EmitGlobalDefinition()");
}
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
@@ -764,12 +779,16 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
// type is an incomplete struct). Use a fake type instead, and make
// sure not to try to set attributes.
bool IsIncompleteFunction = false;
- if (!isa<llvm::FunctionType>(Ty)) {
- Ty = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- std::vector<const llvm::Type*>(), false);
+
+ const llvm::FunctionType *FTy;
+ if (isa<llvm::FunctionType>(Ty)) {
+ FTy = cast<llvm::FunctionType>(Ty);
+ } else {
+ FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ std::vector<const llvm::Type*>(), false);
IsIncompleteFunction = true;
}
- llvm::Function *F = llvm::Function::Create(cast<llvm::FunctionType>(Ty),
+ llvm::Function *F = llvm::Function::Create(FTy,
llvm::Function::ExternalLinkage,
MangledName, &getModule());
assert(F->getName() == MangledName && "name was uniqued!");
@@ -811,7 +830,14 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
}
}
- return F;
+ // Make sure the result is of the requested type.
+ if (!IsIncompleteFunction) {
+ assert(F->getType()->getElementType() == Ty);
+ return F;
+ }
+
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(F, PTy);
}
/// GetAddrOfFunction - Return the address of the given function. If Ty is
@@ -959,7 +985,7 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
}
llvm::GlobalVariable::LinkageTypes
-CodeGenModule::getVtableLinkage(const CXXRecordDecl *RD) {
+CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
return llvm::GlobalVariable::InternalLinkage;
@@ -1204,9 +1230,10 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
UI != E; ) {
// TODO: Do invokes ever occur in C code? If so, we should handle them too.
- unsigned OpNo = UI.getOperandNo();
- llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*UI++);
- if (!CI || OpNo != 0) continue;
+ llvm::Value::use_iterator I = UI++; // Increment before the CI is erased.
+ llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*I);
+ llvm::CallSite CS(CI);
+ if (!CI || !CS.isCallee(I)) continue;
// If the return types don't match exactly, and if the call isn't dead, then
// we can't transform this call.
@@ -1220,8 +1247,8 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
bool DontTransform = false;
for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
- if (CI->getNumOperands()-1 == ArgNo ||
- CI->getOperand(ArgNo+1)->getType() != AI->getType()) {
+ if (CS.arg_size() == ArgNo ||
+ CS.getArgument(ArgNo)->getType() != AI->getType()) {
DontTransform = true;
break;
}
@@ -1231,7 +1258,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
- ArgList.append(CI->op_begin()+1, CI->op_begin()+1+ArgNo);
+ ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
ArgList.end(), "", CI);
ArgList.clear();
@@ -1578,6 +1605,90 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
return GV;
}
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getTargetData().isLittleEndian(),
+ isUTF16, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero =
+ llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get _NSConstantStringClassReference.
+ if (!NSConstantStringClassRef) {
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ Features.ObjCNonFragileABI ?
+ "OBJC_CLASS_$_NSConstantString" :
+ "_NSConstantStringClassReference");
+ // Decay array -> ptr
+ NSConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ }
+
+ QualType NSTy = getContext().getNSConstantStringType();
+
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+
+ std::vector<llvm::Constant*> Fields(3);
+
+ // Class pointer.
+ Fields[0] = NSConstantStringClassRef;
+
+ // String pointer.
+ llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ if (isUTF16) {
+ // FIXME: why do utf strings get "_" labels instead of "L" labels?
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ // Note: -fwritable-strings doesn't make unicode NSStrings writable, but
+ // does make plain ascii ones writable.
+ isConstant = true;
+ } else {
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !Features.WritableStrings;
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+ ".str");
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ }
+ Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+
+ // String length.
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_nsstring_");
+ // FIXME. Fix section.
+ if (const char *Sect =
+ Features.ObjCNonFragileABI
+ ? getContext().Target.getNSStringNonFragileABISection()
+ : getContext().Target.getNSStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
/// GetStringForStringLiteral - Return the appropriate bytes for a
/// string literal, properly padded to match the literal type.
std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
@@ -1709,6 +1820,39 @@ void CodeGenModule::EmitObjCPropertyImplementations(const
}
}
+/// EmitObjCIvarInitializations - Emit information for ivar initialization
+/// for an implementation.
+void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
+ if (!Features.NeXTRuntime || D->getNumIvarInitializers() == 0)
+ return;
+ DeclContext* DC = const_cast<DeclContext*>(dyn_cast<DeclContext>(D));
+ assert(DC && "EmitObjCIvarInitializations - null DeclContext");
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(), cxxSelector,
+ getContext().VoidTy, 0,
+ DC, true, false, true,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(DTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+
+ II = &getContext().Idents.get(".cxx_construct");
+ cxxSelector = getContext().Selectors.getSelector(0, &II);
+ // The constructor returns 'self'.
+ ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(), cxxSelector,
+ getContext().getObjCIdType(), 0,
+ DC, true, false, true,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(CTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+
+
+}
+
/// EmitNamespace - Emit all declarations in a namespace.
void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
for (RecordDecl::decl_iterator I = ND->decls_begin(), E = ND->decls_end();
@@ -1805,6 +1949,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::ObjCImplementation: {
ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
EmitObjCPropertyImplementations(OMD);
+ EmitObjCIvarInitializations(OMD);
Runtime->GenerateClass(OMD);
break;
}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index e9f78bc..93d8ddf 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -21,7 +21,7 @@
#include "CGBlocks.h"
#include "CGCall.h"
#include "CGCXX.h"
-#include "CGVtable.h"
+#include "CGVTables.h"
#include "CodeGenTypes.h"
#include "GlobalDecl.h"
#include "Mangle.h"
@@ -94,7 +94,8 @@ class CodeGenModule : public BlockModule {
/// VTables - Holds information about C++ vtables.
CodeGenVTables VTables;
-
+ friend class CodeGenVTables;
+
CGObjCRuntime* Runtime;
CGDebugInfo* DebugInfo;
@@ -132,6 +133,7 @@ class CodeGenModule : public BlockModule {
llvm::StringMap<llvm::Constant*> CFConstantStringMap;
llvm::StringMap<llvm::Constant*> ConstantStringMap;
+ llvm::DenseMap<const Decl*, llvm::Value*> StaticLocalDeclMap;
/// CXXGlobalInits - Global variables with initializers that need to run
/// before main.
@@ -145,6 +147,10 @@ class CodeGenModule : public BlockModule {
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *CFConstantStringClassRef;
+ /// NSConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *NSConstantStringClassRef;
+
/// Lazily create the Objective-C runtime
void createObjCRuntime();
@@ -169,6 +175,14 @@ public:
/// been configured.
bool hasObjCRuntime() { return !!Runtime; }
+ llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
+ return StaticLocalDeclMap[VD];
+ }
+ void setStaticLocalDeclAddress(const VarDecl *D,
+ llvm::GlobalVariable *GV) {
+ StaticLocalDeclMap[D] = GV;
+ }
+
CGDebugInfo *getDebugInfo() { return DebugInfo; }
ASTContext &getContext() const { return Context; }
const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
@@ -218,7 +232,7 @@ public:
/// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
/// for the given type.
- llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty);
+ llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
/// GetAddrOfThunk - Get the address of the thunk for the given global decl.
llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
@@ -227,11 +241,11 @@ public:
llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
/// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to
- /// its base class. Returns null if the offset is 0.
+ /// a class. Returns null if the offset is 0.
llvm::Constant *
GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl);
-
+ const CXXBaseSpecifierArray &BasePath);
+
/// GetStringForStringLiteral - Return the appropriate bytes for a string
/// literal, properly padded to match the literal type. If only the address of
/// a constant is needed consider using GetAddrOfConstantStringLiteral.
@@ -240,6 +254,10 @@ public:
/// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
/// for the given string.
llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantNSString - Return a pointer to a constant NSString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantNSString(const StringLiteral *Literal);
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
/// for the given string literal.
@@ -416,16 +434,16 @@ public:
llvm::GlobalVariable::LinkageTypes
getFunctionLinkage(const FunctionDecl *FD);
- /// getVtableLinkage - Return the appropriate linkage for the vtable, VTT,
+ /// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
/// and type information of the given class.
static llvm::GlobalVariable::LinkageTypes
- getVtableLinkage(const CXXRecordDecl *RD);
+ getVTableLinkage(const CXXRecordDecl *RD);
/// GetTargetTypeStoreSize - Return the store size, in character units, of
/// the given LLVM type.
CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
- std::vector<const CXXRecordDecl*> DeferredVtables;
+ std::vector<const CXXRecordDecl*> DeferredVTables;
private:
llvm::GlobalValue *GetGlobalValue(llvm::StringRef Ref);
@@ -464,6 +482,7 @@ private:
void EmitGlobalVarDefinition(const VarDecl *D);
void EmitAliasDefinition(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+ void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
// C++ related functions.
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 9b74106d..10e71e2 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -52,7 +52,6 @@ namespace clang {
namespace CodeGen {
class CGRecordLayout;
- class CodeGenTypes;
/// CodeGenTypes - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
@@ -124,10 +123,10 @@ public:
const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
- /// GetFunctionTypeForVtable - Get the LLVM function type for use in a vtable,
+ /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
- const llvm::Type *GetFunctionTypeForVtable(const CXXMethodDecl *MD);
+ const llvm::Type *GetFunctionTypeForVTable(const CXXMethodDecl *MD);
const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 077db7c..8658cfb 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -25,7 +25,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ErrorHandling.h"
-#include "CGVtable.h"
+#include "CGVTables.h"
#define MANGLE_CHECKER 0
@@ -111,6 +111,7 @@ public:
private:
bool mangleSubstitution(const NamedDecl *ND);
bool mangleSubstitution(QualType T);
+ bool mangleSubstitution(TemplateName Template);
bool mangleSubstitution(uintptr_t Ptr);
bool mangleStandardSubstitution(const NamedDecl *ND);
@@ -121,6 +122,7 @@ private:
addSubstitution(reinterpret_cast<uintptr_t>(ND));
}
void addSubstitution(QualType T);
+ void addSubstitution(TemplateName Template);
void addSubstitution(uintptr_t Ptr);
void mangleUnresolvedScope(NestedNameSpecifier *Qualifier);
@@ -138,6 +140,7 @@ private:
unsigned KnownArity);
void mangleUnscopedName(const NamedDecl *ND);
void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleUnscopedTemplateName(TemplateName);
void mangleSourceName(const IdentifierInfo *II);
void mangleLocalName(const NamedDecl *ND);
void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
@@ -147,6 +150,7 @@ private:
unsigned NumTemplateArgs);
void manglePrefix(const DeclContext *DC, bool NoFunction=false);
void mangleTemplatePrefix(const TemplateDecl *ND);
+ void mangleTemplatePrefix(TemplateName Template);
void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
void mangleQualifiers(Qualifiers Quals);
@@ -172,6 +176,9 @@ private:
void mangleCXXCtorType(CXXCtorType T);
void mangleCXXDtorType(CXXDtorType T);
+ void mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
void mangleTemplateArgs(const TemplateParameterList &PL,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
@@ -306,8 +313,6 @@ static bool isStd(const NamespaceDecl *NS) {
static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
while (isa<LinkageSpecDecl>(DC)) {
- assert(cast<LinkageSpecDecl>(DC)->getLanguage() ==
- LinkageSpecDecl::lang_cxx && "Unexpected linkage decl!");
DC = DC->getParent();
}
@@ -429,6 +434,31 @@ void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
addSubstitution(ND);
}
+void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleUnscopedTemplateName(TD);
+
+ if (mangleSubstitution(Template))
+ return;
+
+ // FIXME: How to cope with operators here?
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Not a dependent template name?");
+ if (!Dependent->isIdentifier()) {
+ // FIXME: We can't possibly know the arity of the operator here!
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot mangle dependent operator name");
+ Diags.Report(FullSourceLoc(), DiagID);
+ return;
+ }
+
+ mangleSourceName(Dependent->getIdentifier());
+ addSubstitution(Template);
+}
+
void CXXNameMangler::mangleNumber(int64_t Number) {
// <number> ::= [n] <non-negative decimal integer>
if (Number < 0) {
@@ -475,11 +505,12 @@ void CXXNameMangler::mangleUnresolvedScope(NestedNameSpecifier *Qualifier) {
if (const TemplateSpecializationType *TST =
dyn_cast<TemplateSpecializationType>(QTy)) {
if (!mangleSubstitution(QualType(TST, 0))) {
- TemplateDecl *TD = TST->getTemplateName().getAsTemplateDecl();
- assert(TD && "FIXME: Support dependent template names");
- mangleTemplatePrefix(TD);
- TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
- mangleTemplateArgs(*TemplateParameters, TST->getArgs(),
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
TST->getNumArgs());
addSubstitution(QualType(TST, 0));
}
@@ -741,6 +772,29 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
addSubstitution(cast<NamedDecl>(DC));
}
+void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplatePrefix(TD);
+
+ if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
+ mangleUnresolvedScope(Qualified->getQualifier());
+
+ if (OverloadedTemplateStorage *Overloaded
+ = Template.getAsOverloadedTemplate()) {
+ mangleUnqualifiedName(0, (*Overloaded->begin())->getDeclName(),
+ UnknownArity);
+ return;
+ }
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Unknown template name kind?");
+ mangleUnresolvedScope(Dependent->getQualifier());
+ mangleUnscopedTemplateName(Template);
+}
+
void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
// <template-prefix> ::= <prefix> <template unqualified-name>
// ::= <template-param>
@@ -896,7 +950,7 @@ void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
assert (CD && "Missing container decl in GetNameForMethod");
OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
- OS << '(' << CID->getNameAsString() << ')';
+ OS << '(' << CID << ')';
OS << ' ' << MD->getSelector().getAsString() << ']';
Out << OS.str().size() << OS.str();
@@ -1157,18 +1211,50 @@ void CXXNameMangler::mangleType(const BlockPointerType *T) {
mangleType(T->getPointeeType());
}
-void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
- TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl();
- assert(TD && "FIXME: Support dependent template names!");
+void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ // Mangle injected class name types as if the user had written the
+ // specialization out fully. It may not actually be possible to see
+ // this mangling, though.
+ mangleType(T->getInjectedSpecializationType());
+}
- mangleName(TD, T->getArgs(), T->getNumArgs());
+void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
+ mangleName(TD, T->getArgs(), T->getNumArgs());
+ } else {
+ if (mangleSubstitution(QualType(T, 0)))
+ return;
+
+ mangleTemplatePrefix(T->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
+ addSubstitution(QualType(T, 0));
+ }
}
void CXXNameMangler::mangleType(const DependentNameType *T) {
// Typename types are always nested
Out << 'N';
- mangleUnresolvedScope(T->getQualifier());
- mangleSourceName(T->getIdentifier());
+ if (T->getIdentifier()) {
+ mangleUnresolvedScope(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ } else {
+ const TemplateSpecializationType *TST = T->getTemplateId();
+ if (!mangleSubstitution(QualType(TST, 0))) {
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
+ TST->getNumArgs());
+ addSubstitution(QualType(TST, 0));
+ }
+ }
+
Out << 'E';
}
@@ -1279,17 +1365,33 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
// ::= L <type <value float> E # floating literal
// ::= L <mangled-name> E # external name
switch (E->getStmtClass()) {
- default:
+ case Expr::NoStmtClass:
+#define EXPR(Type, Base)
+#define STMT(Type, Base) \
+ case Expr::Type##Class:
+#include "clang/AST/StmtNodes.def"
llvm_unreachable("unexpected statement kind");
break;
+ default: {
+ // As bad as this diagnostic is, it's better than crashing.
+ Diagnostic &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot yet mangle expression type %0");
+ Diags.Report(FullSourceLoc(E->getExprLoc(),
+ getASTContext().getSourceManager()),
+ DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
Out << "cl";
mangleCalledExpression(CE->getCallee(), CE->getNumArgs());
for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
mangleExpression(CE->getArg(I));
- Out << "E";
+ Out << 'E';
break;
}
@@ -1333,9 +1435,9 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
Out << "cv";
mangleType(CE->getType());
- if (N != 1) Out << "_";
+ if (N != 1) Out << '_';
for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
- if (N != 1) Out << "E";
+ if (N != 1) Out << 'E';
break;
}
@@ -1346,21 +1448,21 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
Out << "cv";
mangleType(CE->getType());
- if (N != 1) Out << "_";
+ if (N != 1) Out << '_';
for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
- if (N != 1) Out << "E";
+ if (N != 1) Out << 'E';
break;
}
case Expr::SizeOfAlignOfExprClass: {
const SizeOfAlignOfExpr *SAE = cast<SizeOfAlignOfExpr>(E);
- if (SAE->isSizeOf()) Out << "s";
- else Out << "a";
+ if (SAE->isSizeOf()) Out << 's';
+ else Out << 'a';
if (SAE->isArgumentType()) {
- Out << "t";
+ Out << 't';
mangleType(SAE->getArgumentType());
} else {
- Out << "z";
+ Out << 'z';
mangleExpression(SAE->getArgumentExpr());
}
break;
@@ -1474,9 +1576,21 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXBindReferenceExprClass:
+ mangleExpression(cast<CXXBindReferenceExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::CXXBindTemporaryExprClass:
+ mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::CXXExprWithTemporariesClass:
+ mangleExpression(cast<CXXExprWithTemporaries>(E)->getSubExpr());
+ break;
+
case Expr::FloatingLiteralClass: {
const FloatingLiteral *FL = cast<FloatingLiteral>(E);
- Out << "L";
+ Out << 'L';
mangleType(FL->getType());
// TODO: avoid this copy with careful stream management.
@@ -1484,10 +1598,23 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
FL->getValue().bitcastToAPInt().toString(Buffer, 16, false);
Out.write(Buffer.data(), Buffer.size());
- Out << "E";
+ Out << 'E';
break;
}
+ case Expr::CharacterLiteralClass:
+ Out << 'L';
+ mangleType(E->getType());
+ Out << cast<CharacterLiteral>(E)->getValue();
+ Out << 'E';
+ break;
+
+ case Expr::CXXBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
case Expr::IntegerLiteralClass:
mangleIntegerLiteral(E->getType(),
llvm::APSInt(cast<IntegerLiteral>(E)->getValue()));
@@ -1535,23 +1662,37 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
}
}
+void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplateArgs(*TD->getTemplateParameters(), TemplateArgs,
+ NumTemplateArgs);
+
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(0, TemplateArgs[i]);
+ Out << 'E';
+}
+
void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
const TemplateArgumentList &AL) {
// <template-args> ::= I <template-arg>+ E
- Out << "I";
+ Out << 'I';
for (unsigned i = 0, e = AL.size(); i != e; ++i)
mangleTemplateArg(PL.getParam(i), AL[i]);
- Out << "E";
+ Out << 'E';
}
void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
// <template-args> ::= I <template-arg>+ E
- Out << "I";
+ Out << 'I';
for (unsigned i = 0; i != NumTemplateArgs; ++i)
mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
- Out << "E";
+ Out << 'E';
}
void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
@@ -1569,7 +1710,7 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
break;
case TemplateArgument::Template:
assert(A.getAsTemplate().getAsTemplateDecl() &&
- "FIXME: Support dependent template names");
+ "Can't get dependent template names here");
mangleName(A.getAsTemplate().getAsTemplateDecl());
break;
case TemplateArgument::Expression:
@@ -1581,6 +1722,7 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
break;
case TemplateArgument::Declaration: {
+ assert(P && "Missing template parameter for declaration argument");
// <expr-primary> ::= L <mangled-name> E # external name
// Clang produces AST's where pointer-to-member-function expressions
@@ -1646,9 +1788,17 @@ bool CXXNameMangler::mangleSubstitution(QualType T) {
return mangleSubstitution(TypePtr);
}
+bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ return mangleSubstitution(
+ reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
- llvm::DenseMap<uintptr_t, unsigned>::iterator I =
- Substitutions.find(Ptr);
+ llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
if (I == Substitutions.end())
return false;
@@ -1660,9 +1810,8 @@ bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
// <seq-id> is encoded in base-36, using digits and upper case letters.
char Buffer[10];
- char *BufferPtr = Buffer + 9;
+ char *BufferPtr = llvm::array_endof(Buffer);
- *BufferPtr = 0;
if (SeqID == 0) *--BufferPtr = '0';
while (SeqID) {
@@ -1674,7 +1823,9 @@ bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
SeqID /= 36;
}
- Out << 'S' << BufferPtr << '_';
+ Out << 'S'
+ << llvm::StringRef(BufferPtr, llvm::array_endof(Buffer)-BufferPtr)
+ << '_';
}
return true;
@@ -1824,6 +1975,14 @@ void CXXNameMangler::addSubstitution(QualType T) {
addSubstitution(TypePtr);
}
+void CXXNameMangler::addSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return addSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
unsigned SeqID = Substitutions.size();
@@ -1925,7 +2084,7 @@ void MangleContext::mangleGuardVariable(const VarDecl *D,
Mangler.mangleName(D);
}
-void MangleContext::mangleCXXVtable(const CXXRecordDecl *RD,
+void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
llvm::SmallVectorImpl<char> &Res) {
// <special-name> ::= TV <type> # virtual table
CXXNameMangler Mangler(*this, Res);
@@ -1941,7 +2100,7 @@ void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
Mangler.mangleName(RD);
}
-void MangleContext::mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset,
+void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
const CXXRecordDecl *Type,
llvm::SmallVectorImpl<char> &Res) {
// <special-name> ::= TC <type> <offset number> _ <base type>
@@ -1949,7 +2108,7 @@ void MangleContext::mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset,
Mangler.getStream() << "_ZTC";
Mangler.mangleName(RD);
Mangler.getStream() << Offset;
- Mangler.getStream() << "_";
+ Mangler.getStream() << '_';
Mangler.mangleName(Type);
}
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
index 91a5e97..da3626f 100644
--- a/lib/CodeGen/Mangle.h
+++ b/lib/CodeGen/Mangle.h
@@ -68,17 +68,21 @@ private:
/// calls to the C++ name mangler.
class MangleContext {
ASTContext &Context;
+ Diagnostic &Diags;
llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
unsigned Discriminator;
llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
public:
- explicit MangleContext(ASTContext &Context)
- : Context(Context) { }
+ explicit MangleContext(ASTContext &Context,
+ Diagnostic &Diags)
+ : Context(Context), Diags(Diags) { }
ASTContext &getASTContext() const { return Context; }
+ Diagnostic &getDiags() const { return Diags; }
+
uint64_t getAnonymousStructId(const TagDecl *TD) {
std::pair<llvm::DenseMap<const TagDecl *,
uint64_t>::iterator, bool> Result =
@@ -99,9 +103,9 @@ public:
const ThisAdjustment &ThisAdjustment,
llvm::SmallVectorImpl<char> &);
void mangleGuardVariable(const VarDecl *D, llvm::SmallVectorImpl<char> &);
- void mangleCXXVtable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
+ void mangleCXXVTable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
void mangleCXXVTT(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
- void mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset,
+ void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
const CXXRecordDecl *Type,
llvm::SmallVectorImpl<char> &);
void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 59e8e77..e1fdf86 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -43,7 +43,8 @@ void ABIArgInfo::dump() const {
getCoerceToType()->print(OS);
break;
case Indirect:
- OS << "Indirect Align=" << getIndirectAlign();
+ OS << "Indirect Align=" << getIndirectAlign()
+ << " Byal=" << getIndirectByVal();
break;
case Expand:
OS << "Expand";
@@ -270,7 +271,7 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
llvm::LLVMContext &VMContext) const {
if (CodeGenFunction::hasAggregateLLVMType(Ty))
return ABIArgInfo::getIndirect(0);
-
+
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -291,8 +292,10 @@ class X86_32ABIInfo : public ABIInfo {
static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
- static unsigned getIndirectArgumentAlignment(QualType Ty,
- ASTContext &Context);
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context,
+ bool ByVal = true) const;
public:
ABIArgInfo classifyReturnType(QualType RetTy,
@@ -490,14 +493,19 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
}
}
-unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
- ASTContext &Context) {
- unsigned Align = Context.getTypeAlign(Ty);
- if (Align < 128) return 0;
- if (const RecordType* RT = Ty->getAs<RecordType>())
- if (typeContainsSSEVector(RT->getDecl(), Context))
- return 16;
- return 0;
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context,
+ bool ByVal) const {
+ if (!ByVal)
+ return ABIArgInfo::getIndirect(0, false);
+
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 4;
+ unsigned Align = Context.getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
@@ -510,11 +518,10 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
-
+ return getIndirectResult(Ty, Context, /*ByVal=*/false);
+
if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
- Context));
+ return getIndirectResult(Ty, Context);
}
// Ignore empty structs.
@@ -529,7 +536,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
canExpandIndirectArgument(Ty, Context))
return ABIArgInfo::getExpand();
- return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
+ return getIndirectResult(Ty, Context);
} else {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -685,9 +692,12 @@ class X86_64ABIInfo : public ABIInfo {
ASTContext &Context) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty, ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty,
- ASTContext &Context) const;
+ ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context) const;
ABIArgInfo classifyReturnType(QualType RetTy,
ASTContext &Context,
@@ -1060,6 +1070,22 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
return ABIArgInfo::getCoerce(CoerceTo);
}
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
ASTContext &Context) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
@@ -1073,10 +1099,16 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
- bool ByVal = !isRecordWithNonTrivialDestructorOrCopyConstructor(Ty);
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
- // FIXME: Set alignment correctly.
- return ABIArgInfo::getIndirect(0, ByVal);
+ // Compute the byval alignment. We trust the back-end to honor the
+ // minimum ABI alignment for byval, to make cleaner IR.
+ const unsigned MinABIAlign = 8;
+ unsigned Align = Context.getTypeAlign(Ty) / 8;
+ if (Align > MinABIAlign)
+ return ABIArgInfo::getIndirect(Align);
+ return ABIArgInfo::getIndirect(0);
}
ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
@@ -1104,7 +1136,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
// hidden argument.
case Memory:
- return getIndirectResult(RetTy, Context);
+ return getIndirectReturnResult(RetTy, Context);
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
@@ -1126,7 +1158,8 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
// %st1.
case ComplexX87:
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
+ ResType = llvm::StructType::get(VMContext,
+ llvm::Type::getX86_FP80Ty(VMContext),
llvm::Type::getX86_FP80Ty(VMContext),
NULL);
break;
@@ -1574,7 +1607,7 @@ ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
OpenPOWER on IntegriCloud