summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGClass.cpp8
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp35
-rw-r--r--lib/CodeGen/CGDecl.cpp126
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp4
-rw-r--r--lib/CodeGen/CGRTTI.cpp2
-rw-r--r--lib/CodeGen/CGVtable.cpp479
-rw-r--r--lib/CodeGen/CGVtable.h20
-rw-r--r--lib/CodeGen/CodeGenModule.cpp23
-rw-r--r--lib/CodeGen/CodeGenModule.h3
-rw-r--r--lib/CodeGen/Makefile4
-rw-r--r--lib/CodeGen/Mangle.cpp173
-rw-r--r--lib/CodeGen/TargetInfo.cpp94
12 files changed, 603 insertions, 368 deletions
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 99c6dfd..525e858 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -98,7 +98,7 @@ CodeGenModule::ComputeThunkAdjustment(const CXXRecordDecl *ClassDecl,
}
if (VBase)
VirtualOffset =
- getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
+ getVtableInfo().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
uint64_t Offset =
ComputeNonVirtualBaseClassOffset(getContext(), Paths.front(), Start);
@@ -1540,11 +1540,11 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
Int8PtrTy->getPointerTo());
VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
- int64_t VBaseOffsetIndex =
- CGM.getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
+ int64_t VBaseOffsetOffset =
+ CGM.getVtableInfo().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
llvm::Value *VBaseOffsetPtr =
- Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetIndex, "vbase.offset.ptr");
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset, "vbase.offset.ptr");
const llvm::Type *PtrDiffTy =
ConvertType(getContext().getPointerDiffType());
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index c3302e6..60aa4e7 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -104,7 +104,10 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
void CGDebugInfo::CreateCompileUnit() {
// Get absolute path name.
- llvm::sys::Path AbsFileName(CGM.getCodeGenOpts().MainFileName);
+ std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
+ if (MainFileName.empty())
+ MainFileName = "<unknown>";
+ llvm::sys::Path AbsFileName(MainFileName);
AbsFileName.makeAbsolute();
unsigned LangTag;
@@ -649,9 +652,9 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
if (BI->isVirtual()) {
- // virtual base offset index is -ve. The code generator emits dwarf
+ // virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
- BaseOffset = 0 - CGM.getVtableInfo().getVirtualBaseOffsetIndex(RD, Base);
+ BaseOffset = 0 - CGM.getVtableInfo().getVirtualBaseOffsetOffset(RD, Base);
BFlags = llvm::DIType::FlagVirtual;
} else
BaseOffset = RL.getBaseClassOffset(Base);
@@ -774,9 +777,8 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// A RD->getName() is not unique. However, the debug info descriptors
// are uniqued so use type name to ensure uniquness.
- llvm::SmallString<256> FwdDeclName;
- FwdDeclName.resize(256);
- sprintf(&FwdDeclName[0], "fwd.type.%d", FwdDeclCount++);
+ llvm::SmallString<128> FwdDeclName;
+ llvm::raw_svector_ostream(FwdDeclName) << "fwd.type." << FwdDeclCount++;
llvm::DIDescriptor FDContext =
getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
llvm::DICompositeType FwdDecl =
@@ -792,6 +794,9 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// Otherwise, insert it into the TypeCache so that recursive uses will find
// it.
TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
+ // Push the struct on region stack.
+ RegionStack.push_back(FwdDecl.getNode());
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl.getNode());
// Convert all the elements.
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
@@ -822,6 +827,12 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ RegionStack.pop_back();
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
+ RegionMap.find(Ty->getDecl());
+ if (RI != RegionMap.end())
+ RegionMap.erase(RI);
+
llvm::DIDescriptor RDContext =
getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
llvm::DICompositeType RealDecl =
@@ -834,7 +845,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
-
+ RegionMap[RD] = llvm::WeakVH(RealDecl.getNode());
return RealDecl;
}
@@ -874,6 +885,9 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// Otherwise, insert it into the TypeCache so that recursive uses will find
// it.
TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
+ // Push the struct on region stack.
+ RegionStack.push_back(FwdDecl.getNode());
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl.getNode());
// Convert all the elements.
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
@@ -946,6 +960,12 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIArray Elements =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ RegionStack.pop_back();
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
+ RegionMap.find(Ty->getDecl());
+ if (RI != RegionMap.end())
+ RegionMap.erase(RI);
+
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
@@ -958,6 +978,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+ RegionMap[ID] = llvm::WeakVH(RealDecl.getNode());
return RealDecl;
}
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 793a220..1dc083f 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -211,6 +211,8 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
if (D.getInit())
GV = AddInitializerToGlobalBlockVarDecl(D, GV);
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
// FIXME: Merge attribute handling.
if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
SourceManager &SM = CGM.getContext().getSourceManager();
@@ -471,68 +473,6 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
EnsureInsertPoint();
}
- if (Init) {
- llvm::Value *Loc = DeclPtr;
- if (isByRef)
- Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
- D.getNameAsString());
-
- bool isVolatile =
- getContext().getCanonicalType(D.getType()).isVolatileQualified();
-
- // If the initializer was a simple constant initializer, we can optimize it
- // in various ways.
- if (IsSimpleConstantInitializer) {
- llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
- assert(Init != 0 && "Wasn't a simple constant init?");
-
- llvm::Value *AlignVal =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- Align.getQuantity());
- const llvm::Type *IntPtr =
- llvm::IntegerType::get(VMContext, LLVMPointerWidth);
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(IntPtr,
- getContext().getTypeSizeInChars(Ty).getQuantity());
-
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
- if (Loc->getType() != BP)
- Loc = Builder.CreateBitCast(Loc, BP, "tmp");
-
- // If the initializer is all zeros, codegen with memset.
- if (isa<llvm::ConstantAggregateZero>(Init)) {
- llvm::Value *Zero =
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
- Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
- } else {
- // Otherwise, create a temporary global with the initializer then
- // memcpy from the global to the alloca.
- std::string Name = GetStaticDeclName(*this, D, ".");
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- Init, Name, 0, false, 0);
- GV->setAlignment(Align.getQuantity());
-
- llvm::Value *SrcPtr = GV;
- if (SrcPtr->getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
-
- Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
- }
- } else if (Ty->isReferenceType()) {
- RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
- EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
- } else if (!hasAggregateLLVMType(Init->getType())) {
- llvm::Value *V = EmitScalarExpr(Init);
- EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
- } else if (Init->getType()->isAnyComplexType()) {
- EmitComplexExprIntoAddr(Init, Loc, isVolatile);
- } else {
- EmitAggExpr(Init, Loc, isVolatile);
- }
- }
-
if (isByRef) {
const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
@@ -591,6 +531,68 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
}
+ if (Init) {
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
+ bool isVolatile =
+ getContext().getCanonicalType(D.getType()).isVolatileQualified();
+
+ // If the initializer was a simple constant initializer, we can optimize it
+ // in various ways.
+ if (IsSimpleConstantInitializer) {
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
+ assert(Init != 0 && "Wasn't a simple constant init?");
+
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ Align.getQuantity());
+ const llvm::Type *IntPtr =
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(IntPtr,
+ getContext().getTypeSizeInChars(Ty).getQuantity());
+
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ if (Loc->getType() != BP)
+ Loc = Builder.CreateBitCast(Loc, BP, "tmp");
+
+ // If the initializer is all zeros, codegen with memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) {
+ llvm::Value *Zero =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
+ Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
+ } else {
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = GetStaticDeclName(*this, D, ".");
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name, 0, false, 0);
+ GV->setAlignment(Align.getQuantity());
+
+ llvm::Value *SrcPtr = GV;
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+ Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
+ }
+ } else if (Ty->isReferenceType()) {
+ RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
+ EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
+ } else if (!hasAggregateLLVMType(Init->getType())) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
+ } else if (Init->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, Loc, isVolatile);
+ } else {
+ EmitAggExpr(Init, Loc, isVolatile);
+ }
+ }
+
// Handle CXX destruction of variables.
QualType DtorTy(Ty);
while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 198e2d1..2436357 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -1147,8 +1147,8 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
// Collect the names of referenced protocols
llvm::SmallVector<std::string, 16> Protocols;
- const ObjCInterfaceDecl *ClassDecl = OCD->getClassInterface();
- const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+ const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
+ const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
E = Protos.end(); I != E; ++I)
Protocols.push_back((*I)->getNameAsString());
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 5236d20..4907223 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -760,7 +760,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
// subobject. For a virtual base, this is the offset in the virtual table of
// the virtual base offset for the virtual base referenced (negative).
if (Base->isVirtual())
- OffsetFlags = CGM.getVtableInfo().getVirtualBaseOffsetIndex(RD, BaseDecl);
+ OffsetFlags = CGM.getVtableInfo().getVirtualBaseOffsetOffset(RD, BaseDecl);
else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8;
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp
index 4500ec0..9bcf986 100644
--- a/lib/CodeGen/CGVtable.cpp
+++ b/lib/CodeGen/CGVtable.cpp
@@ -63,10 +63,7 @@ public:
/// Offset - the base offset of the overrider in the layout class.
uint64_t Offset;
- /// OldOffset - FIXME: Remove this.
- int64_t OldOffset;
-
- OverriderInfo() : Method(0), Offset(0), OldOffset(0) { }
+ OverriderInfo() : Method(0), Offset(0) { }
};
private:
@@ -251,7 +248,6 @@ void FinalOverriders::AddOverriders(BaseSubobject Base,
OverriderInfo& Overrider = OverridersMap[std::make_pair(Base, MD)];
assert(!Overrider.Method && "Overrider should not exist yet!");
- Overrider.OldOffset = Base.getBaseOffset();
Overrider.Offset = OffsetInLayoutClass;
Overrider.Method = MD;
}
@@ -415,7 +411,6 @@ void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD,
// Set the new overrider.
Overrider.Offset = OverriderOffsetInLayoutClass;
- Overrider.OldOffset = NewBase.getBaseOffset();
Overrider.Method = NewMD;
// And propagate it further.
@@ -559,7 +554,7 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
Out << " " << MD->getQualifiedNameAsString() << " - (";
Out << Overrider.Method->getQualifiedNameAsString();
- Out << ", " << Overrider.OldOffset / 8 << ", " << Overrider.Offset / 8 << ')';
+ Out << ", " << ", " << Overrider.Offset / 8 << ')';
AdjustmentOffsetsMapTy::const_iterator AI =
ReturnAdjustments.find(std::make_pair(Base, MD));
@@ -834,6 +829,11 @@ int64_t VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
class VCallAndVBaseOffsetBuilder {
+public:
+ typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ VBaseOffsetOffsetsMapTy;
+
+private:
/// MostDerivedClass - The most derived class for which we're building vcall
/// and vbase offsets.
const CXXRecordDecl *MostDerivedClass;
@@ -856,6 +856,11 @@ class VCallAndVBaseOffsetBuilder {
/// VCallOffsets - Keeps track of vcall offsets.
VCallOffsetMap VCallOffsets;
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
+ /// relative to the address point.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
/// FinalOverriders - The final overriders of the most derived class.
/// (Can be null when we're not building a vtable of the most derived class).
const FinalOverriders *Overriders;
@@ -871,6 +876,10 @@ class VCallAndVBaseOffsetBuilder {
/// AddVBaseOffsets - Add vbase offsets for the given class.
void AddVBaseOffsets(const CXXRecordDecl *Base, uint64_t OffsetInLayoutClass);
+ /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
+ /// bytes, relative to the vtable address point.
+ int64_t getCurrentOffsetOffset() const;
+
public:
VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
const CXXRecordDecl *LayoutClass,
@@ -889,7 +898,10 @@ public:
const_iterator components_begin() const { return Components.rbegin(); }
const_iterator components_end() const { return Components.rend(); }
- const VCallOffsetMap& getVCallOffsets() const { return VCallOffsets; }
+ const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
};
void
@@ -940,6 +952,20 @@ VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
AddVCallOffsets(Base, RealBaseOffset);
}
+int64_t VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
+ // OffsetIndex is the index of this vcall or vbase offset, relative to the
+ // vtable address point. (We subtract 3 to account for the information just
+ // above the address point, the RTTI info, the offset to top, and the
+ // vcall offset itself).
+ int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+
+ // FIXME: We shouldn't use / 8 here.
+ int64_t OffsetOffset = OffsetIndex *
+ (int64_t)Context.Target.getPointerWidth(0) / 8;
+
+ return OffsetOffset;
+}
+
void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
uint64_t VBaseOffset) {
const CXXRecordDecl *RD = Base.getBase();
@@ -980,15 +1006,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
if (!MD->isVirtual())
continue;
- // OffsetIndex is the index of this vcall offset, relative to the vtable
- // address point. (We subtract 3 to account for the information just
- // above the address point, the RTTI info, the offset to top, and the
- // vcall offset itself).
- int64_t OffsetIndex = -(int64_t)(3 + Components.size());
-
- // FIXME: We shouldn't use / 8 here.
- int64_t OffsetOffset = OffsetIndex *
- (int64_t)Context.Target.getPointerWidth(0) / 8;
+ int64_t OffsetOffset = getCurrentOffsetOffset();
// Don't add a vcall offset if we already have one for this member function
// signature.
@@ -1048,10 +1066,17 @@ void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
int64_t Offset =
(int64_t)(LayoutClassLayout.getVBaseClassOffset(BaseDecl) -
OffsetInLayoutClass) / 8;
-
+
+ // Add the vbase offset offset.
+ assert(!VBaseOffsetOffsets.count(BaseDecl) &&
+ "vbase offset offset already exists!");
+
+ int64_t VBaseOffsetOffset = getCurrentOffsetOffset();
+ VBaseOffsetOffsets.insert(std::make_pair(BaseDecl, VBaseOffsetOffset));
+
Components.push_back(VtableComponent::MakeVBaseOffset(Offset));
}
-
+
// Check the base class looking for more vbase offsets.
AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
}
@@ -1096,6 +1121,13 @@ private:
/// bases in this vtable.
llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
+ typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ VBaseOffsetOffsetsMapTy;
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
+ /// the most derived class.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
/// Components - The components of the vtable being built.
llvm::SmallVector<VtableComponent, 64> Components;
@@ -1117,24 +1149,27 @@ private:
bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
};
- /// ReturnAdjustments - The return adjustments needed in this vtable.
- llvm::SmallVector<std::pair<uint64_t, ReturnAdjustment>, 16>
- ReturnAdjustments;
-
/// MethodInfo - Contains information about a method in a vtable.
/// (Used for computing 'this' pointer adjustment thunks.
struct MethodInfo {
/// BaseOffset - The base offset of this method.
const uint64_t BaseOffset;
+ /// BaseOffsetInLayoutClass - The base offset in the layout class of this
+ /// method.
+ const uint64_t BaseOffsetInLayoutClass;
+
/// VtableIndex - The index in the vtable that this method has.
/// (For destructors, this is the index of the complete destructor).
const uint64_t VtableIndex;
- MethodInfo(uint64_t BaseOffset, uint64_t VtableIndex)
- : BaseOffset(BaseOffset), VtableIndex(VtableIndex) { }
+ MethodInfo(uint64_t BaseOffset, uint64_t BaseOffsetInLayoutClass,
+ uint64_t VtableIndex)
+ : BaseOffset(BaseOffset),
+ BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
+ VtableIndex(VtableIndex) { }
- MethodInfo() : BaseOffset(0), VtableIndex(0) { }
+ MethodInfo() : BaseOffset(0), BaseOffsetInLayoutClass(0), VtableIndex(0) { }
};
typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
@@ -1158,9 +1193,27 @@ private:
bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
};
- /// ThisAdjustments - The 'this' pointer adjustments needed in this vtable.
- llvm::SmallVector<std::pair<uint64_t, ThisAdjustment>, 16>
- ThisAdjustments;
+ /// ThunkInfo - The 'this' pointer adjustment as well as an optional return
+ /// adjustment for a thunk.
+ struct ThunkInfo {
+ /// This - The 'this' pointer adjustment.
+ ThisAdjustment This;
+
+ /// Return - The return adjustment.
+ ReturnAdjustment Return;
+
+ ThunkInfo() { }
+
+ ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
+ : This(This), Return(Return) { }
+
+ bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
+ };
+
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> ThunksInfoMapTy;
+
+ /// Thunks - The thunks by vtable index in the vtable currently being built.
+ ThunksInfoMapTy Thunks;
/// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
/// part of the vtable we're currently building.
@@ -1182,11 +1235,13 @@ private:
BaseSubobject Derived) const;
/// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
- /// given virtual member function and the 'this' pointer adjustment base
- /// offset.
- ThisAdjustment ComputeThisAdjustment(const CXXMethodDecl *MD,
- BaseOffset Offset);
-
+ /// given virtual member function, its offset in the layout class and its
+ /// final overrider.
+ ThisAdjustment
+ ComputeThisAdjustment(const CXXMethodDecl *MD,
+ uint64_t BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider);
+
/// AddMethod - Add a single virtual member function to the vtable
/// components vector.
void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
@@ -1235,7 +1290,11 @@ private:
/// LayoutSecondaryVtables - Layout the secondary vtables for the given base
/// subobject.
- void LayoutSecondaryVtables(BaseSubobject Base, uint64_t OffsetInLayoutClass);
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ void LayoutSecondaryVtables(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ uint64_t OffsetInLayoutClass);
/// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
/// class hierarchy.
@@ -1292,8 +1351,6 @@ OverridesMethodInBases(const CXXMethodDecl *MD,
}
void VtableBuilder::ComputeThisAdjustments() {
- std::map<uint64_t, ThisAdjustment> SortedThisAdjustments;
-
// Now go through the method info map and see if any of the methods need
// 'this' pointer adjustments.
for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
@@ -1301,56 +1358,34 @@ void VtableBuilder::ComputeThisAdjustments() {
const CXXMethodDecl *MD = I->first;
const MethodInfo &MethodInfo = I->second;
- BaseSubobject OverriddenBaseSubobject(MD->getParent(),
- MethodInfo.BaseOffset);
-
- // Get the final overrider for this method.
- FinalOverriders::OverriderInfo Overrider =
- Overriders.getOverrider(OverriddenBaseSubobject, MD);
-
- // Check if we need an adjustment.
- if (Overrider.OldOffset == (int64_t)MethodInfo.BaseOffset)
- continue;
-
- uint64_t VtableIndex = MethodInfo.VtableIndex;
-
- // Ignore adjustments for pure virtual member functions.
- if (Overrider.Method->isPure())
- continue;
-
// Ignore adjustments for unused function pointers.
+ uint64_t VtableIndex = MethodInfo.VtableIndex;
if (Components[VtableIndex].getKind() ==
VtableComponent::CK_UnusedFunctionPointer)
continue;
+
+ // Get the final overrider for this method.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(BaseSubobject(MD->getParent(),
+ MethodInfo.BaseOffset), MD);
+
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
- BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
- Overrider.OldOffset);
-
- // Compute the adjustment offset.
- BaseOffset ThisAdjustmentOffset =
- ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
- OverriderBaseSubobject);
-
- // Then compute the adjustment itself.
- ThisAdjustment ThisAdjustment = ComputeThisAdjustment(Overrider.Method,
- ThisAdjustmentOffset);
+ if (ThisAdjustment.isEmpty())
+ continue;
// Add it.
- SortedThisAdjustments.insert(std::make_pair(VtableIndex, ThisAdjustment));
-
+ Thunks[VtableIndex].This = ThisAdjustment;
+
if (isa<CXXDestructorDecl>(MD)) {
// Add an adjustment for the deleting destructor as well.
- SortedThisAdjustments.insert(std::make_pair(VtableIndex + 1,
- ThisAdjustment));
+ Thunks[VtableIndex + 1].This = ThisAdjustment;
}
}
/// Clear the method info map.
MethodInfoMap.clear();
-
- // Add the sorted elements.
- ThisAdjustments.append(SortedThisAdjustments.begin(),
- SortedThisAdjustments.end());
}
VtableBuilder::ReturnAdjustment
@@ -1360,13 +1395,20 @@ VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
if (!Offset.isEmpty()) {
if (Offset.VirtualBase) {
// Get the virtual base offset offset.
- Adjustment.VBaseOffsetOffset =
- VtableInfo.getVirtualBaseOffsetIndex(Offset.DerivedClass,
- Offset.VirtualBase);
- // FIXME: Once the assert in getVirtualBaseOffsetIndex is back again,
+ if (Offset.DerivedClass == MostDerivedClass) {
+ // We can get the offset offset directly from our map.
+ Adjustment.VBaseOffsetOffset =
+ VBaseOffsetOffsets.lookup(Offset.VirtualBase);
+ } else {
+ Adjustment.VBaseOffsetOffset =
+ VtableInfo.getVirtualBaseOffsetOffset(Offset.DerivedClass,
+ Offset.VirtualBase);
+ }
+
+ // FIXME: Once the assert in getVirtualBaseOffsetOffset is back again,
// we can get rid of this assert.
assert(Adjustment.VBaseOffsetOffset != 0 &&
- "Invalid base offset offset!");
+ "Invalid vbase offset offset!");
}
Adjustment.NonVirtual = Offset.NonVirtualOffset;
@@ -1402,13 +1444,13 @@ VtableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
if (Offset.VirtualBase) {
// If we have a virtual base class, the non-virtual offset is relative
// to the virtual base class offset.
- const ASTRecordLayout &MostDerivedClassLayout =
- Context.getASTRecordLayout(MostDerivedClass);
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
/// Get the virtual base offset, relative to the most derived class
/// layout.
OffsetToBaseSubobject +=
- MostDerivedClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
} else {
// Otherwise, the non-virtual offset is relative to the derived class
// offset.
@@ -1427,38 +1469,57 @@ VtableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
return BaseOffset();
}
+VtableBuilder::ThisAdjustment
+VtableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
+ uint64_t BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) {
+ // Check if we need an adjustment at all.
+ if (BaseOffsetInLayoutClass == Overrider.Offset)
+ return ThisAdjustment();
+
+ // Ignore adjustments for pure virtual member functions.
+ if (Overrider.Method->isPure())
+ return ThisAdjustment();
+
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ BaseOffsetInLayoutClass);
+
+ BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
+ Overrider.Offset);
+
+ // Compute the adjustment offset.
+ BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
+ OverriderBaseSubobject);
+ if (Offset.isEmpty())
+ return ThisAdjustment();
-VtableBuilder::ThisAdjustment
-VtableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
- BaseOffset Offset) {
ThisAdjustment Adjustment;
- if (!Offset.isEmpty()) {
- if (Offset.VirtualBase) {
- // Get the vcall offset map for this virtual base.
- VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
-
- if (VCallOffsets.empty()) {
- // We don't have vcall offsets for this virtual base, go ahead and
- // build them.
- VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
- /*FinalOverriders=*/0,
- BaseSubobject(Offset.VirtualBase, 0),
- /*BaseIsVirtual=*/true,
- /*OffsetInLayoutClass=*/0);
+ if (Offset.VirtualBase) {
+ // Get the vcall offset map for this virtual base.
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
+
+ if (VCallOffsets.empty()) {
+ // We don't have vcall offsets for this virtual base, go ahead and
+ // build them.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
+ /*FinalOverriders=*/0,
+ BaseSubobject(Offset.VirtualBase, 0),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/0);
- VCallOffsets = Builder.getVCallOffsets();
- }
-
- Adjustment.VCallOffsetOffset = VCallOffsets.getVCallOffsetOffset(MD);
+ VCallOffsets = Builder.getVCallOffsets();
}
-
- Adjustment.NonVirtual = Offset.NonVirtualOffset;
+
+ Adjustment.VCallOffsetOffset = VCallOffsets.getVCallOffsetOffset(MD);
}
+
+ // Set the non-virtual part of the adjustment.
+ Adjustment.NonVirtual = Offset.NonVirtualOffset;
return Adjustment;
}
-
+
void
VtableBuilder::AddMethod(const CXXMethodDecl *MD,
ReturnAdjustment ReturnAdjustment) {
@@ -1472,8 +1533,7 @@ VtableBuilder::AddMethod(const CXXMethodDecl *MD,
} else {
// Add the return adjustment if necessary.
if (!ReturnAdjustment.isEmpty())
- ReturnAdjustments.push_back(std::make_pair(Components.size(),
- ReturnAdjustment));
+ Thunks[Components.size()].Return = ReturnAdjustment;
// Add the function.
Components.push_back(VtableComponent::MakeFunction(MD));
@@ -1665,6 +1725,7 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
MethodInfo MethodInfo(Base.getBaseOffset(),
+ BaseOffsetInLayoutClass,
OverriddenMethodInfo.VtableIndex);
assert(!MethodInfoMap.count(MD) &&
@@ -1677,7 +1738,8 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
}
// Insert the method info for this method.
- MethodInfo MethodInfo(Base.getBaseOffset(), Components.size());
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ Components.size());
assert(!MethodInfoMap.count(MD) &&
"Should not have method info for this method yet!");
@@ -1737,6 +1799,11 @@ VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
VCallOffsets = Builder.getVCallOffsets();
}
+ // If we're laying out the most derived class we want to keep track of the
+ // virtual base class offset offsets.
+ if (Base.getBase() == MostDerivedClass)
+ VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
+
// Add the offset to top.
// FIXME: We should not use / 8 here.
int64_t OffsetToTop = -(int64_t)(OffsetInLayoutClass -
@@ -1772,11 +1839,16 @@ VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
AddressPoints.insert(std::make_pair(PrimaryBase, AddressPoint));
}
+ bool BaseIsMorallyVirtual = BaseIsVirtual;
+ if (isBuildingConstructorVtable() && Base.getBase() == MostDerivedClass)
+ BaseIsMorallyVirtual = false;
+
// Layout secondary vtables.
- LayoutSecondaryVtables(Base, OffsetInLayoutClass);
+ LayoutSecondaryVtables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
}
void VtableBuilder::LayoutSecondaryVtables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
uint64_t OffsetInLayoutClass) {
// Itanium C++ ABI 2.5.2:
// Following the primary virtual table of a derived class are secondary
@@ -1800,6 +1872,16 @@ void VtableBuilder::LayoutSecondaryVtables(BaseSubobject Base,
if (!BaseDecl->isDynamicClass())
continue;
+ if (isBuildingConstructorVtable()) {
+ // Itanium C++ ABI 2.6.4:
+ // Some of the base class subobjects may not need construction virtual
+ // tables, which will therefore not be present in the construction
+ // virtual table group, even though the subobject virtual tables are
+ // present in the main virtual table group for the complete object.
+ if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
+ continue;
+ }
+
// Get the base offset of this base.
uint64_t RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
uint64_t BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
@@ -1810,7 +1892,7 @@ void VtableBuilder::LayoutSecondaryVtables(BaseSubobject Base,
// to emit secondary vtables for other bases of this base.
if (BaseDecl == PrimaryBase) {
LayoutSecondaryVtables(BaseSubobject(BaseDecl, BaseOffset),
- BaseOffsetInLayoutClass);
+ BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
continue;
}
@@ -1920,15 +2002,15 @@ VtableBuilder::LayoutVtablesForVirtualBases(const CXXRecordDecl *RD,
/// dumpLayout - Dump the vtable layout.
void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
- if (MostDerivedClass == LayoutClass) {
- Out << "Vtable for '";
- Out << MostDerivedClass->getQualifiedNameAsString();
- } else {
+ if (isBuildingConstructorVtable()) {
Out << "Construction vtable for ('";
Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
// FIXME: Don't use / 8 .
Out << MostDerivedClassOffset / 8 << ") in '";
Out << LayoutClass->getQualifiedNameAsString();
+ } else {
+ Out << "Vtable for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
}
Out << "' (" << Components.size() << " entries).\n";
@@ -1945,8 +2027,6 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
AddressPointsByIndex.insert(std::make_pair(Index, Base));
}
- unsigned NextReturnAdjustmentIndex = 0;
- unsigned NextThisAdjustmentIndex = 0;
for (unsigned I = 0, E = Components.size(); I != E; ++I) {
uint64_t Index = I;
@@ -1983,38 +2063,33 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
if (MD->isPure())
Out << " [pure]";
- // If this function pointer has a return adjustment, dump it.
- if (NextReturnAdjustmentIndex < ReturnAdjustments.size() &&
- ReturnAdjustments[NextReturnAdjustmentIndex].first == I) {
- const ReturnAdjustment Adjustment =
- ReturnAdjustments[NextReturnAdjustmentIndex].second;
-
- Out << "\n [return adjustment: ";
- Out << Adjustment.NonVirtual << " non-virtual";
-
- if (Adjustment.VBaseOffsetOffset)
- Out << ", " << Adjustment.VBaseOffsetOffset << " vbase offset offset";
-
- Out << ']';
+ ThunkInfo Thunk = Thunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this function pointer has a return adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "\n [return adjustment: ";
+ Out << Thunk.Return.NonVirtual << " non-virtual";
+
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
- NextReturnAdjustmentIndex++;
- }
-
- // If this function pointer has a 'this' pointer adjustment, dump it.
- if (NextThisAdjustmentIndex < ThisAdjustments.size() &&
- ThisAdjustments[NextThisAdjustmentIndex].first == I) {
- const ThisAdjustment Adjustment =
- ThisAdjustments[NextThisAdjustmentIndex].second;
-
- Out << "\n [this adjustment: ";
- Out << Adjustment.NonVirtual << " non-virtual";
+ Out << ']';
+ }
- if (Adjustment.VCallOffsetOffset)
- Out << ", " << Adjustment.VCallOffsetOffset << " vcall offset offset";
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
- Out << ']';
-
- NextThisAdjustmentIndex++;
+ Out << ']';
+ }
}
break;
@@ -2036,23 +2111,21 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
if (DD->isPure())
Out << " [pure]";
- // If this destructor has a 'this' pointer adjustment, dump it.
- if (NextThisAdjustmentIndex < ThisAdjustments.size() &&
- ThisAdjustments[NextThisAdjustmentIndex].first == I) {
- const ThisAdjustment Adjustment =
- ThisAdjustments[NextThisAdjustmentIndex].second;
-
- Out << "\n [this adjustment: ";
- Out << Adjustment.NonVirtual << " non-virtual";
-
- if (Adjustment.VCallOffsetOffset)
- Out << ", " << Adjustment.VCallOffsetOffset << " vcall offset offset";
-
- Out << ']';
-
- NextThisAdjustmentIndex++;
- }
-
+ ThunkInfo Thunk = Thunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this destructor has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
break;
}
@@ -2108,6 +2181,29 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
}
Out << '\n';
+
+ if (!isBuildingConstructorVtable() && MostDerivedClass->getNumVBases()) {
+ Out << "Virtual base offset offsets for '";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "'.\n";
+
+ // We store the virtual base class names and their offsets in a map to get
+ // a stable order.
+ std::map<std::string, int64_t> ClassNamesAndOffsets;
+
+ for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
+ E = VBaseOffsetOffsets.end(); I != E; ++I) {
+ std::string ClassName = I->first->getQualifiedNameAsString();
+ int64_t OffsetOffset = I->second;
+ ClassNamesAndOffsets.insert(std::make_pair(ClassName, OffsetOffset));
+ }
+
+ for (std::map<std::string, int64_t>::const_iterator I =
+ ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
+ I != E; ++I)
+ Out << " " << I->first << " | " << I->second << '\n';
+
+ Out << "\n";
+ }
}
}
@@ -2598,7 +2694,7 @@ public:
CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
if (D != MostDerivedClass)
- return CGM.getVtableInfo().getVirtualBaseOffsetIndex(D, B);
+ return CGM.getVtableInfo().getVirtualBaseOffsetOffset(D, B);
llvm::DenseMap<const CXXRecordDecl *, Index_t>::iterator i;
i = VBIndex.find(B);
if (i != VBIndex.end())
@@ -3346,39 +3442,39 @@ CGVtableInfo::getAdjustments(GlobalDecl GD) {
return 0;
}
-int64_t CGVtableInfo::getVirtualBaseOffsetIndex(const CXXRecordDecl *RD,
- const CXXRecordDecl *VBase) {
+int64_t CGVtableInfo::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
ClassPairTy ClassPair(RD, VBase);
- VirtualBaseClassIndiciesTy::iterator I =
- VirtualBaseClassIndicies.find(ClassPair);
- if (I != VirtualBaseClassIndicies.end())
+ VirtualBaseClassOffsetOffsetsMapTy::iterator I =
+ VirtualBaseClassOffsetOffsets.find(ClassPair);
+ if (I != VirtualBaseClassOffsetOffsets.end())
return I->second;
- // FIXME: This seems expensive. Can we do a partial job to get
- // just this data.
- AddressPointsMapTy AddressPoints;
- OldVtableBuilder b(RD, RD, 0, CGM, false, AddressPoints);
- D1(printf("vtable %s\n", RD->getNameAsCString()));
- b.GenerateVtableForBase(RD);
- b.GenerateVtableForVBases(RD);
+ VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0,
+ BaseSubobject(RD, 0),
+ /*BaseIsVirtual=*/false,
+ /*OffsetInLayoutClass=*/0);
- for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
- b.getVBIndex().begin(), E = b.getVBIndex().end(); I != E; ++I) {
+
+ for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
// Insert all types.
ClassPairTy ClassPair(RD, I->first);
- VirtualBaseClassIndicies.insert(std::make_pair(ClassPair, I->second));
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
}
- I = VirtualBaseClassIndicies.find(ClassPair);
+ I = VirtualBaseClassOffsetOffsets.find(ClassPair);
+
// FIXME: The assertion below assertion currently fails with the old vtable
/// layout code if there is a non-virtual thunk adjustment in a vtable.
// Once the new layout is in place, this return should be removed.
- if (I == VirtualBaseClassIndicies.end())
+ if (I == VirtualBaseClassOffsetOffsets.end())
return 0;
- assert(I != VirtualBaseClassIndicies.end() && "Did not find index!");
+ assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
return I->second;
}
@@ -3400,20 +3496,17 @@ CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
if (GenerateDefinition) {
if (LayoutClass == RD) {
assert(!IsVirtual &&
- "Can't only have a virtual base in construction vtables!");
- VtableBuilder Builder(*this, RD, Offset,
- /*MostDerivedClassIsVirtual=*/false,
- LayoutClass);
-
- if (CGM.getLangOptions().DumpVtableLayouts)
- Builder.dumpLayout(llvm::errs());
- } else if (CGM.getLangOptions().DumpVtableLayouts) {
- // We only build construction vtables when dumping vtable layouts for now.
- VtableBuilder Builder(*this, RD, Offset,
- /*MostDerivedClassIsVirtual=*/IsVirtual,
- LayoutClass);
- Builder.dumpLayout(llvm::errs());
+ "Can only have a virtual base in construction vtables!");
+ assert(!Offset &&
+ "Can only have a base offset in construction vtables!");
}
+
+ VtableBuilder Builder(*this, RD, Offset,
+ /*MostDerivedClassIsVirtual=*/IsVirtual,
+ LayoutClass);
+
+ if (CGM.getLangOptions().DumpVtableLayouts)
+ Builder.dumpLayout(llvm::errs());
}
llvm::SmallString<256> OutName;
diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVtable.h
index 57220d9..5a146ab 100644
--- a/lib/CodeGen/CGVtable.h
+++ b/lib/CodeGen/CGVtable.h
@@ -149,10 +149,12 @@ private:
typedef std::pair<const CXXRecordDecl *,
const CXXRecordDecl *> ClassPairTy;
- /// VirtualBaseClassIndicies - Contains the index into the vtable where the
- /// offsets for virtual bases of a class are stored.
- typedef llvm::DenseMap<ClassPairTy, int64_t> VirtualBaseClassIndiciesTy;
- VirtualBaseClassIndiciesTy VirtualBaseClassIndicies;
+ /// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
+ /// the address point) in bytes where the offsets for virtual bases of a class
+ /// are stored.
+ typedef llvm::DenseMap<ClassPairTy, int64_t>
+ VirtualBaseClassOffsetOffsetsMapTy;
+ VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
/// Vtables - All the vtables which have been defined.
llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> Vtables;
@@ -202,13 +204,13 @@ public:
/// stored.
uint64_t getMethodVtableIndex(GlobalDecl GD);
- /// getVirtualBaseOffsetIndex - Return the index (relative to the vtable
- /// address point) where the offset of the virtual base that contains the
- /// given Base is stored, otherwise, if no virtual base contains the given
+ /// getVirtualBaseOffsetOffset - Return the offset in bytes (relative to the
+ /// vtable address point) where the offset of the virtual base that contains
+ /// the given base is stored, otherwise, if no virtual base contains the given
/// class, return 0. Base must be a virtual base class or an unambigious
/// base.
- int64_t getVirtualBaseOffsetIndex(const CXXRecordDecl *RD,
- const CXXRecordDecl *VBase);
+ int64_t getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase);
AdjustmentVectorTy *getAdjustments(GlobalDecl GD);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index c67948d..f41db14 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -285,8 +285,7 @@ GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
break;
case TSK_ExplicitInstantiationDefinition:
- // FIXME: explicit instantiation definitions should use weak linkage
- return CodeGenModule::GVA_StrongExternal;
+ return CodeGenModule::GVA_ExplicitTemplateInstantiation;
case TSK_ExplicitInstantiationDeclaration:
case TSK_ImplicitInstantiation:
@@ -343,6 +342,12 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// merged with other definitions. c) C++ has the ODR, so we know the
// definition is dependable.
return llvm::Function::LinkOnceODRLinkage;
+ } else if (Linkage == GVA_ExplicitTemplateInstantiation) {
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ return llvm::Function::WeakODRLinkage;
} else {
assert(Linkage == GVA_StrongExternal);
// Otherwise, we have strong external linkage.
@@ -589,6 +594,7 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
// static, static inline, always_inline, and extern inline functions can
// always be deferred. Normal inline functions can be deferred in C99/C++.
+ // Implicit template instantiations can also be deferred in C++.
if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
return true;
@@ -1043,15 +1049,15 @@ GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
switch (TSK) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
-
- // FIXME: ExplicitInstantiationDefinition should be weak!
- case TSK_ExplicitInstantiationDefinition:
return CodeGenModule::GVA_StrongExternal;
-
+
case TSK_ExplicitInstantiationDeclaration:
llvm_unreachable("Variable should not be instantiated");
// Fall through to treat this like any other instantiation.
+ case TSK_ExplicitInstantiationDefinition:
+ return CodeGenModule::GVA_ExplicitTemplateInstantiation;
+
case TSK_ImplicitInstantiation:
return CodeGenModule::GVA_TemplateInstantiation;
}
@@ -1171,7 +1177,10 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setLinkage(llvm::GlobalVariable::WeakODRLinkage);
else
GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
- } else if (Linkage == GVA_TemplateInstantiation)
+ } else if (Linkage == GVA_TemplateInstantiation ||
+ Linkage == GVA_ExplicitTemplateInstantiation)
+ // FIXME: It seems like we can provide more specific linkage here
+ // (LinkOnceODR, WeakODR).
GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon &&
!D->hasExternalStorage() && !D->getInit() &&
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 40dc563..9077ade 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -437,7 +437,8 @@ public:
GVA_C99Inline,
GVA_CXXInline,
GVA_StrongExternal,
- GVA_TemplateInstantiation
+ GVA_TemplateInstantiation,
+ GVA_ExplicitTemplateInstantiation
};
llvm::GlobalVariable::LinkageTypes
diff --git a/lib/CodeGen/Makefile b/lib/CodeGen/Makefile
index 83cb367..3cea6bb 100644
--- a/lib/CodeGen/Makefile
+++ b/lib/CodeGen/Makefile
@@ -16,9 +16,9 @@ LEVEL = ../../../..
LIBRARYNAME := clangCodeGen
BUILD_ARCHIVE = 1
-CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
ifdef CLANG_VENDOR
-CPPFLAGS += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
+CPP.Flags += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
endif
include $(LEVEL)/Makefile.common
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 2e0580f..32555ab 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -53,19 +53,19 @@ static const DeclContext *GetLocalClassFunctionDeclContext(
static const CXXMethodDecl *getStructor(const CXXMethodDecl *MD) {
assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
"Passed in decl is not a ctor or dtor!");
-
+
if (const TemplateDecl *TD = MD->getPrimaryTemplate()) {
MD = cast<CXXMethodDecl>(TD->getTemplatedDecl());
assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
"Templated decl is not a ctor or dtor!");
}
-
+
return MD;
}
static const unsigned UnknownArity = ~0U;
-
+
/// CXXNameMangler - Manage the mangling of a single name.
class CXXNameMangler {
MangleContext &Context;
@@ -73,7 +73,7 @@ class CXXNameMangler {
const CXXMethodDecl *Structor;
unsigned StructorType;
-
+
llvm::DenseMap<uintptr_t, unsigned> Substitutions;
ASTContext &getASTContext() const { return Context.getASTContext(); }
@@ -92,7 +92,7 @@ public:
~CXXNameMangler() {
if (Out.str()[0] == '\01')
return;
-
+
int status = 0;
char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
assert(status == 0 && "Could not demangle mangled name!");
@@ -151,7 +151,7 @@ private:
void mangleQualifiers(Qualifiers Quals);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
-
+
// Declare manglers for every type class.
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT)
@@ -172,10 +172,12 @@ private:
void mangleCXXCtorType(CXXCtorType T);
void mangleCXXDtorType(CXXDtorType T);
- void mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleTemplateArgs(const TemplateArgumentList &L);
- void mangleTemplateArg(const TemplateArgument &A);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL);
+ void mangleTemplateArg(const NamedDecl *P, const TemplateArgument &A);
void mangleTemplateParameter(unsigned Index);
};
@@ -248,8 +250,10 @@ void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) {
Out << Prefix;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleName(VD);
else
- mangleName(cast<VarDecl>(D));
+ mangleName(cast<FieldDecl>(D));
}
void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
@@ -306,7 +310,7 @@ static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
LinkageSpecDecl::lang_cxx && "Unexpected linkage decl!");
DC = DC->getParent();
}
-
+
return DC;
}
@@ -315,10 +319,10 @@ static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
static bool isStdNamespace(const DeclContext *DC) {
if (!DC->isNamespace())
return false;
-
+
if (!IgnoreLinkageSpecDecls(DC->getParent())->isTranslationUnit())
return false;
-
+
return isStd(cast<NamespaceDecl>(DC));
}
@@ -349,12 +353,12 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) {
// ::= <local-name>
//
const DeclContext *DC = ND->getDeclContext();
-
+
if (GetLocalClassFunctionDeclContext(DC)) {
mangleLocalName(ND);
return;
}
-
+
// If this is an extern variable declared locally, the relevant DeclContext
// is that of the containing namespace, or the translation unit.
if (isa<FunctionDecl>(DC) && ND->hasLinkage())
@@ -369,7 +373,8 @@ void CXXNameMangler::mangleName(const NamedDecl *ND) {
const TemplateArgumentList *TemplateArgs = 0;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleUnscopedTemplateName(TD);
- mangleTemplateArgs(*TemplateArgs);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
return;
}
@@ -391,7 +396,8 @@ void CXXNameMangler::mangleName(const TemplateDecl *TD,
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
mangleUnscopedTemplateName(TD);
- mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
} else {
mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
}
@@ -417,7 +423,7 @@ void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
= dyn_cast<TemplateTemplateParmDecl>(ND)) {
mangleTemplateParameter(TTP->getIndex());
return;
- }
+ }
mangleUnscopedName(ND->getTemplatedDecl());
addSubstitution(ND);
@@ -429,7 +435,7 @@ void CXXNameMangler::mangleNumber(int64_t Number) {
Out << 'n';
Number = -Number;
}
-
+
Out << Number;
}
@@ -445,7 +451,7 @@ void CXXNameMangler::mangleCallOffset(const ThunkAdjustment &Adjustment) {
Out << '_';
return;
}
-
+
Out << 'v';
mangleNumber(Adjustment.NonVirtual);
Out << '_';
@@ -496,7 +502,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
case DeclarationName::Identifier: {
if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
// We must avoid conflicts between internally- and externally-
- // linked variable declaration names in the same TU.
+ // linked variable declaration names in the same TU.
// This naming convention is the same as that followed by GCC, though it
// shouldn't actually matter.
if (ND && isa<VarDecl>(ND) && ND->getLinkage() == InternalLinkage &&
@@ -582,7 +588,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
unsigned Arity;
if (ND) {
Arity = cast<FunctionDecl>(ND)->getNumParams();
-
+
// If we have a C++ member function, we need to include the 'this' pointer.
// FIXME: This does not make sense for operators that are static, but their
// names stay the same regardless of the arity (operator new for instance).
@@ -628,7 +634,8 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
const TemplateArgumentList *TemplateArgs = 0;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
- mangleTemplateArgs(*TemplateArgs);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
}
else {
manglePrefix(DC, NoFunction);
@@ -645,7 +652,8 @@ void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
Out << 'N';
mangleTemplatePrefix(TD);
- mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
Out << 'E';
}
@@ -656,26 +664,26 @@ void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
// <discriminator> := _ <non-negative number>
const DeclContext *DC = ND->getDeclContext();
Out << 'Z';
-
+
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC))
mangleObjCMethodName(MD);
else if (const DeclContext *CDC = GetLocalClassFunctionDeclContext(DC)) {
mangleFunctionEncoding(cast<FunctionDecl>(CDC));
Out << 'E';
mangleNestedName(ND, DC, true /*NoFunction*/);
-
+
// FIXME. This still does not cover all cases.
unsigned disc;
if (Context.getNextDiscriminator(ND, disc)) {
if (disc < 10)
Out << '_' << disc;
- else
+ else
Out << "__" << disc << '_';
}
return;
}
- else
+ else
mangleFunctionEncoding(cast<FunctionDecl>(DC));
Out << 'E';
@@ -702,7 +710,8 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
const TemplateArgumentList *TemplateArgs = 0;
if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) {
mangleTemplatePrefix(TD);
- mangleTemplateArgs(*TemplateArgs);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
}
else if(NoFunction && isa<FunctionDecl>(DC))
return;
@@ -729,7 +738,7 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
= dyn_cast<TemplateTemplateParmDecl>(ND)) {
mangleTemplateParameter(TTP->getIndex());
return;
- }
+ }
manglePrefix(ND->getDeclContext());
mangleUnqualifiedName(ND->getTemplatedDecl());
@@ -749,22 +758,22 @@ CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
case OO_Array_Delete: Out << "da"; break;
// ::= ps # + (unary)
// ::= pl # +
- case OO_Plus:
+ case OO_Plus:
assert((Arity == 1 || Arity == 2) && "Invalid arity!");
Out << (Arity == 1? "ps" : "pl"); break;
// ::= ng # - (unary)
// ::= mi # -
- case OO_Minus:
+ case OO_Minus:
assert((Arity == 1 || Arity == 2) && "Invalid arity!");
Out << (Arity == 1? "ng" : "mi"); break;
// ::= ad # & (unary)
// ::= an # &
- case OO_Amp:
+ case OO_Amp:
assert((Arity == 1 || Arity == 2) && "Invalid arity!");
Out << (Arity == 1? "ad" : "an"); break;
// ::= de # * (unary)
// ::= ml # *
- case OO_Star:
+ case OO_Star:
assert((Arity == 1 || Arity == 2) && "Invalid arity!");
Out << (Arity == 1? "de" : "ml"); break;
// ::= co # ~
@@ -863,15 +872,15 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
llvm::SmallString<64> Name;
llvm::raw_svector_ostream OS(Name);
-
- const ObjCContainerDecl *CD =
+
+ const ObjCContainerDecl *CD =
dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
assert (CD && "Missing container decl in GetNameForMethod");
OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
OS << '(' << CID->getNameAsString() << ')';
OS << ' ' << MD->getSelector().getAsString() << ']';
-
+
Out << OS.str().size() << OS.str();
}
@@ -1143,7 +1152,9 @@ void CXXNameMangler::mangleType(const TypenameType *T) {
TemplateDecl *TD = TST->getTemplateName().getAsTemplateDecl();
assert(TD && "FIXME: Support dependent template names");
mangleTemplatePrefix(TD);
- mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TST->getArgs(),
+ TST->getNumArgs());
addSubstitution(QualType(TST, 0));
}
} else if (const TemplateTypeParmType *TTPT =
@@ -1173,7 +1184,7 @@ void CXXNameMangler::mangleType(const TypeOfExprType *T) {
void CXXNameMangler::mangleType(const DecltypeType *T) {
Expr *E = T->getUnderlyingExpr();
-
+
// type ::= Dt <expression> E # decltype of an id-expression
// # or class member access
// ::= DT <expression> E # decltype of an expression
@@ -1195,11 +1206,11 @@ void CXXNameMangler::mangleType(const DecltypeType *T) {
Out << 'E';
}
-void CXXNameMangler::mangleIntegerLiteral(QualType T,
+void CXXNameMangler::mangleIntegerLiteral(QualType T,
const llvm::APSInt &Value) {
// <expr-primary> ::= L <type> <value number> E # integer literal
Out << 'L';
-
+
mangleType(T);
if (T->isBooleanType()) {
// Boolean values are encoded as 0/1.
@@ -1210,7 +1221,7 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T,
Value.abs().print(Out, false);
}
Out << 'E';
-
+
}
void CXXNameMangler::mangleCalledExpression(const Expr *E, unsigned Arity) {
@@ -1314,7 +1325,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
- case Expr::CXXUnresolvedConstructExprClass: {
+ case Expr::CXXUnresolvedConstructExprClass: {
const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
unsigned N = CE->arg_size();
@@ -1323,7 +1334,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
if (N != 1) Out << "_";
for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
if (N != 1) Out << "E";
- break;
+ break;
}
case Expr::CXXTemporaryObjectExprClass:
@@ -1355,18 +1366,18 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
case Expr::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(E);
- mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
+ mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
/*Arity=*/1);
mangleExpression(UO->getSubExpr());
break;
}
-
+
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(E);
- mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
+ mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
/*Arity=*/2);
mangleExpression(BO->getLHS());
- mangleExpression(BO->getRHS());
+ mangleExpression(BO->getRHS());
break;
}
@@ -1396,7 +1407,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
mangleExpression(ECE->getSubExpr());
break;
}
-
+
case Expr::CXXOperatorCallExprClass: {
const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
unsigned NumArgs = CE->getNumArgs();
@@ -1406,7 +1417,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
mangleExpression(CE->getArg(i));
break;
}
-
+
case Expr::ParenExprClass:
mangleExpression(cast<ParenExpr>(E)->getSubExpr());
break;
@@ -1415,7 +1426,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
switch (D->getKind()) {
- default:
+ default:
// <expr-primary> ::= L <mangled-name> E # external name
Out << 'L';
mangle(D, "_Z");
@@ -1466,7 +1477,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
mangleType(FL->getType());
// TODO: avoid this copy with careful stream management.
- llvm::SmallVector<char,20> Buffer;
+ llvm::SmallString<20> Buffer;
FL->getValue().bitcastToAPInt().toString(Buffer, 16, false);
Out.write(Buffer.data(), Buffer.size());
@@ -1475,7 +1486,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
}
case Expr::IntegerLiteralClass:
- mangleIntegerLiteral(E->getType(),
+ mangleIntegerLiteral(E->getType(),
llvm::APSInt(cast<IntegerLiteral>(E)->getValue()));
break;
@@ -1521,24 +1532,27 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
}
}
-void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentList &L) {
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL) {
// <template-args> ::= I <template-arg>+ E
Out << "I";
- for (unsigned i = 0, e = L.size(); i != e; ++i)
- mangleTemplateArg(L[i]);
+ for (unsigned i = 0, e = AL.size(); i != e; ++i)
+ mangleTemplateArg(PL.getParam(i), AL[i]);
Out << "E";
}
-void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
// <template-args> ::= I <template-arg>+ E
Out << "I";
for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(TemplateArgs[i]);
+ mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
Out << "E";
}
-void CXXNameMangler::mangleTemplateArg(const TemplateArgument &A) {
+void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
+ const TemplateArgument &A) {
// <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
// ::= <expr-primary> # simple expressions
@@ -1554,7 +1568,7 @@ void CXXNameMangler::mangleTemplateArg(const TemplateArgument &A) {
assert(A.getAsTemplate().getAsTemplateDecl() &&
"FIXME: Support dependent template names");
mangleName(A.getAsTemplate().getAsTemplateDecl());
- break;
+ break;
case TemplateArgument::Expression:
Out << 'X';
mangleExpression(A.getAsExpr());
@@ -1566,18 +1580,33 @@ void CXXNameMangler::mangleTemplateArg(const TemplateArgument &A) {
case TemplateArgument::Declaration: {
// <expr-primary> ::= L <mangled-name> E # external name
- // FIXME: Clang produces AST's where pointer-to-member-function expressions
+ // Clang produces AST's where pointer-to-member-function expressions
// and pointer-to-function expressions are represented as a declaration not
- // an expression; this is not how gcc represents them and this changes the
- // mangling.
+ // an expression. We compensate for it here to produce the correct mangling.
+ NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
+ const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
+ bool compensateMangling = D->isCXXClassMember() &&
+ !Parameter->getType()->isReferenceType();
+ if (compensateMangling) {
+ Out << 'X';
+ mangleOperatorName(OO_Amp, 1);
+ }
+
Out << 'L';
// References to external entities use the mangled name; if the name would
// not normally be manged then mangle it as unqualified.
//
// FIXME: The ABI specifies that external names here should have _Z, but
// gcc leaves this off.
- mangle(cast<NamedDecl>(A.getAsDecl()), "Z");
+ if (compensateMangling)
+ mangle(D, "_Z");
+ else
+ mangle(D, "Z");
Out << 'E';
+
+ if (compensateMangling)
+ Out << 'E';
+
break;
}
}
@@ -1689,20 +1718,20 @@ bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl *SD,
const char (&Str)[StrLen]) {
if (!SD->getIdentifier()->isStr(Str))
return false;
-
+
const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
if (TemplateArgs.size() != 2)
return false;
-
+
if (!isCharType(TemplateArgs[0].getAsType()))
return false;
-
+
if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
return false;
-
+
return true;
}
-
+
bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
// <substitution> ::= St # ::std::
if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
@@ -1769,7 +1798,7 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
Out << "So";
return true;
}
-
+
// <substitution> ::= Sd # ::std::basic_iostream<char,
// ::std::char_traits<char> >
if (isStreamCharSpecialization(SD, "basic_iostream")) {
@@ -1838,7 +1867,7 @@ void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
/// \brief Mangles the a thunk with the offset n for the declaration D and
/// emits that name to the given output stream.
-void MangleContext::mangleThunk(const FunctionDecl *FD,
+void MangleContext::mangleThunk(const FunctionDecl *FD,
const ThunkAdjustment &ThisAdjustment,
llvm::SmallVectorImpl<char> &Res) {
assert(!isa<CXXDestructorDecl>(FD) &&
@@ -1866,7 +1895,7 @@ void MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *D,
/// \brief Mangles the a covariant thunk for the declaration D and emits that
/// name to the given output stream.
-void
+void
MangleContext::mangleCovariantThunk(const FunctionDecl *FD,
const CovariantThunkAdjustment& Adjustment,
llvm::SmallVectorImpl<char> &Res) {
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index cb6a7df..59e8e77 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -268,16 +268,15 @@ llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
ASTContext &Context,
llvm::LLVMContext &VMContext) const {
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty))
return ABIArgInfo::getIndirect(0);
- } else {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
/// X86_32ABIInfo - The X86-32 ABI information.
@@ -1367,6 +1366,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// i8* reg_save_area;
// };
unsigned neededInt, neededSSE;
+
+ Ty = CGF.getContext().getCanonicalType(Ty);
ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
neededInt, neededSSE);
@@ -1596,6 +1597,80 @@ llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
+// PowerPC-32
+
+namespace {
+class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte general-purpose registers
+ for (unsigned I = 0, E = 32; I != E; ++I) {
+ llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
+ Builder.CreateStore(Four8, Slot);
+ }
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ for (unsigned I = 32, E = 64; I != E; ++I) {
+ llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
+ Builder.CreateStore(Eight8, Slot);
+ }
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ for (unsigned I = 64, E = 77; I != E; ++I) {
+ llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
+ Builder.CreateStore(Four8, Slot);
+ }
+
+ // 77-108: v0-31, the 16-byte vector registers
+ for (unsigned I = 77, E = 109; I != E; ++I) {
+ llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
+ Builder.CreateStore(Sixteen8, Slot);
+ }
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ for (unsigned I = 109, E = 114; I != E; ++I) {
+ llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
+ Builder.CreateStore(Four8, Slot);
+ }
+
+ return false;
+}
+
+
// ARM ABI Implementation
namespace {
@@ -2040,6 +2115,9 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
case llvm::Triple::pic16:
return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
+ case llvm::Triple::ppc:
+ return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo());
+
case llvm::Triple::systemz:
return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
OpenPOWER on IntegriCloud