summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGBlocks.cpp26
-rw-r--r--lib/CodeGen/CGBuiltin.cpp13
-rw-r--r--lib/CodeGen/CGClass.cpp24
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp193
-rw-r--r--lib/CodeGen/CGDebugInfo.h8
-rw-r--r--lib/CodeGen/CGExprConstant.cpp14
-rw-r--r--lib/CodeGen/CGObjC.cpp3
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp25
-rw-r--r--lib/CodeGen/CGObjCMac.cpp6
-rw-r--r--lib/CodeGen/CGObjCRuntime.h2
-rw-r--r--lib/CodeGen/CGVTT.cpp398
-rw-r--r--lib/CodeGen/CGVtable.cpp444
-rw-r--r--lib/CodeGen/CMakeLists.txt1
-rw-r--r--lib/CodeGen/CodeGenModule.cpp9
-rw-r--r--lib/CodeGen/CodeGenModule.h5
-rw-r--r--lib/CodeGen/TargetABIInfo.cpp1821
-rw-r--r--lib/CodeGen/TargetInfo.cpp14
-rw-r--r--lib/CodeGen/TargetInfo.h4
18 files changed, 690 insertions, 2320 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 1fa422f..ca5b6fa 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -18,7 +18,6 @@
#include "llvm/Module.h"
#include "llvm/Target/TargetData.h"
#include <algorithm>
-#include <cstdio>
using namespace clang;
using namespace CodeGen;
@@ -221,11 +220,10 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
C = llvm::ConstantStruct::get(VMContext, Elts, false);
- char Name[32];
- sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount());
C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- C, Name);
+ llvm::GlobalValue::InternalLinkage, C,
+ "__block_holder_tmp_" +
+ llvm::Twine(CGM.getGlobalUniqueCount()));
QualType BPT = BE->getType();
C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
return C;
@@ -747,13 +745,12 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(ResultType, Args);
- std::string Name = std::string("__") + Info.Name + "_block_invoke_";
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- Name,
+ llvm::Twine("__") + Info.Name + "_block_invoke_",
&CGM.getModule());
CGM.SetInternalFunctionAttributes(BD, Fn, FI);
@@ -875,14 +872,12 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- std::string Name = std::string("__copy_helper_block_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- Name,
- &CGM.getModule());
+ "__copy_helper_block_", &CGM.getModule());
IdentifierInfo *II
= &CGM.getContext().Idents.get("__copy_helper_block_");
@@ -958,14 +953,12 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- std::string Name = std::string("__destroy_helper_block_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- Name,
- &CGM.getModule());
+ "__destroy_helper_block_", &CGM.getModule());
IdentifierInfo *II
= &CGM.getContext().Idents.get("__destroy_helper_block_");
@@ -1042,7 +1035,6 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
- std::string Name = std::string("__Block_byref_id_object_copy_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
@@ -1050,8 +1042,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
// internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- Name,
- &CGM.getModule());
+ "__Block_byref_id_object_copy_", &CGM.getModule());
IdentifierInfo *II
= &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
@@ -1107,7 +1098,6 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
- std::string Name = std::string("__Block_byref_id_object_dispose_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
@@ -1115,7 +1105,7 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
// internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- Name,
+ "__Block_byref_id_object_dispose_",
&CGM.getModule());
IdentifierInfo *II
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 866c587..f11d52e 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -72,6 +72,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
switch (BuiltinID) {
default: break; // Handle intrinsics and libm functions below.
case Builtin::BI__builtin___CFStringMakeConstantString:
+ case Builtin::BI__builtin___NSStringMakeConstantString:
return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
@@ -556,6 +557,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
+ case Builtin::BI__builtin_llvm_memory_barrier: {
+ Value *C[5] = {
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)),
+ EmitScalarExpr(E->getArg(4))
+ };
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ return RValue::get(0);
+ }
+
// Library functions with special handling.
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index ab3fece..a822ca2 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -394,10 +394,8 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
if (BitwiseAssign)
EmitAggregateCopy(Dest, Src, Ty);
else {
- bool hasCopyAssign = BaseClassDecl->hasConstCopyAssignment(getContext(),
- MD);
- assert(hasCopyAssign && "EmitClassAggrCopyAssignment - No user assign");
- (void)hasCopyAssign;
+ BaseClassDecl->hasConstCopyAssignment(getContext(), MD);
+ assert(MD && "EmitClassAggrCopyAssignment - No user assign");
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
const llvm::Type *LTy =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
@@ -410,8 +408,10 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
MD->getThisType(getContext())));
// Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- MD->getParamDecl(0)->getType()));
+ QualType SrcTy = MD->getParamDecl(0)->getType();
+ RValue SrcValue = SrcTy->isReferenceType() ? RValue::get(Src) :
+ RValue::getAggregate(Src);
+ CallArgs.push_back(std::make_pair(SrcValue, SrcTy));
QualType ResultType = MD->getType()->getAs<FunctionType>()->getResultType();
EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
Callee, ReturnValueSlot(), CallArgs, MD);
@@ -531,10 +531,8 @@ void CodeGenFunction::EmitClassCopyAssignment(
}
const CXXMethodDecl *MD = 0;
- bool ConstCopyAssignOp = BaseClassDecl->hasConstCopyAssignment(getContext(),
- MD);
- assert(ConstCopyAssignOp && "EmitClassCopyAssignment - missing copy assign");
- (void)ConstCopyAssignOp;
+ BaseClassDecl->hasConstCopyAssignment(getContext(), MD);
+ assert(MD && "EmitClassCopyAssignment - missing copy assign");
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
const llvm::Type *LTy =
@@ -548,8 +546,10 @@ void CodeGenFunction::EmitClassCopyAssignment(
MD->getThisType(getContext())));
// Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- MD->getParamDecl(0)->getType()));
+ QualType SrcTy = MD->getParamDecl(0)->getType();
+ RValue SrcValue = SrcTy->isReferenceType() ? RValue::get(Src) :
+ RValue::getAggregate(Src);
+ CallArgs.push_back(std::make_pair(SrcValue, SrcTy));
QualType ResultType =
MD->getType()->getAs<FunctionType>()->getResultType();
EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index ab8f663..1ffad3e 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -460,58 +460,15 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
return DbgTy;
}
-/// CreateType - get structure or union type.
-llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
- llvm::DICompileUnit Unit) {
- RecordDecl *Decl = Ty->getDecl();
-
- unsigned Tag;
- if (Decl->isStruct())
- Tag = llvm::dwarf::DW_TAG_structure_type;
- else if (Decl->isUnion())
- Tag = llvm::dwarf::DW_TAG_union_type;
- else {
- assert(Decl->isClass() && "Unknown RecordType!");
- Tag = llvm::dwarf::DW_TAG_class_type;
- }
-
+/// CollectRecordFields - A helper function to collect debug info for
+/// record fields. This is used while creating debug info entry for a Record.
+void CGDebugInfo::
+CollectRecordFields(const RecordDecl *Decl,
+ llvm::DICompileUnit Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+ unsigned FieldNo = 0;
SourceManager &SM = CGM.getContext().getSourceManager();
-
- // Get overall information about the record type for the debug info.
- PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
- llvm::DICompileUnit DefUnit;
- unsigned Line = 0;
- if (!PLoc.isInvalid()) {
- DefUnit = getOrCreateCompileUnit(Decl->getLocation());
- Line = PLoc.getLine();
- }
-
- // Records and classes and unions can all be recursive. To handle them, we
- // first generate a debug descriptor for the struct as a forward declaration.
- // Then (if it is a definition) we go through and get debug info for all of
- // its members. Finally, we create a descriptor for the complete type (which
- // may refer to the forward decl if the struct is recursive) and replace all
- // uses of the forward declaration with the final definition.
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag, Unit, Decl->getName(),
- DefUnit, Line, 0, 0, 0, 0,
- llvm::DIType(), llvm::DIArray());
-
- // If this is just a forward declaration, return it.
- if (!Decl->getDefinition(CGM.getContext()))
- return FwdDecl;
-
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode = FwdDecl.getNode();
- // Otherwise, insert it into the TypeCache so that recursive uses will find
- // it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
-
- // Convert all the elements.
- llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
-
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(Decl);
-
- unsigned FieldNo = 0;
for (RecordDecl::field_iterator I = Decl->field_begin(),
E = Decl->field_end();
I != E; ++I, ++FieldNo) {
@@ -560,6 +517,125 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
}
+}
+
+/// CollectCXXMemberFunctions - A helper function to collect debug info for
+/// C++ member functions.This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
+ llvm::DICompileUnit Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DICompositeType &RecordTy) {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ for(CXXRecordDecl::method_iterator I = Decl->method_begin(),
+ E = Decl->method_end(); I != E; ++I) {
+ CXXMethodDecl *Method = *I;
+ llvm::StringRef MethodName;
+ llvm::StringRef MethodLinkageName;
+ llvm::DIType MethodTy = getOrCreateType(Method->getType(), Unit);
+ if (CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (CDecl->isImplicit())
+ continue;
+ MethodName = Decl->getName();
+ // FIXME : Find linkage name.
+ } else if (CXXDestructorDecl *DDecl = dyn_cast<CXXDestructorDecl>(Method)) {
+ if (DDecl->isImplicit())
+ continue;
+ MethodName = getFunctionName(Method);
+ // FIXME : Find linkage name.
+ } else {
+ if (Method->isImplicit())
+ continue;
+ // regular method
+ MethodName = getFunctionName(Method);
+ MethodLinkageName = CGM.getMangledName(Method);
+ }
+
+ // Get the location for the method.
+ SourceLocation MethodDefLoc = Method->getLocation();
+ PresumedLoc PLoc = SM.getPresumedLoc(MethodDefLoc);
+ llvm::DICompileUnit MethodDefUnit;
+ unsigned MethodLine = 0;
+
+ if (!PLoc.isInvalid()) {
+ MethodDefUnit = getOrCreateCompileUnit(MethodDefLoc);
+ MethodLine = PLoc.getLine();
+ }
+
+ llvm::DISubprogram SP =
+ DebugFactory.CreateSubprogram(RecordTy , MethodName, MethodName,
+ MethodLinkageName,
+ MethodDefUnit, MethodLine,
+ MethodTy, false,
+ Method->isThisDeclarationADefinition(),
+ 0 /*Virtuality*/, 0 /*VIndex*/,
+ llvm::DIType() /*ContainingType*/);
+ if (Method->isThisDeclarationADefinition())
+ SPCache[cast<FunctionDecl>(Method)] = llvm::WeakVH(SP.getNode());
+ EltTys.push_back(SP);
+ }
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
+ llvm::DICompileUnit Unit) {
+ RecordDecl *Decl = Ty->getDecl();
+
+ unsigned Tag;
+ if (Decl->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (Decl->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else {
+ assert(Decl->isClass() && "Unknown RecordType!");
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+
+ // Get overall information about the record type for the debug info.
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ llvm::DICompileUnit DefUnit;
+ unsigned Line = 0;
+ if (!PLoc.isInvalid()) {
+ DefUnit = getOrCreateCompileUnit(Decl->getLocation());
+ Line = PLoc.getLine();
+ }
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+
+ // A Decl->getName() is not unique. However, the debug info descriptors
+ // are uniqued. The debug info descriptor describing record's context is
+ // necessary to keep two Decl's descriptor unique if their name match.
+ // FIXME : Use RecordDecl's DeclContext's descriptor. As a temp. step
+ // use type's name in FwdDecl.
+ std::string STy = QualType(Ty, 0).getAsString();
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, STy.c_str(),
+ DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray());
+
+ // If this is just a forward declaration, return it.
+ if (!Decl->getDefinition(CGM.getContext()))
+ return FwdDecl;
+
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode = FwdDecl.getNode();
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ CollectRecordFields(Decl, Unit, EltTys);
+ if (CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(Decl))
+ CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
llvm::DIArray Elements =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
@@ -1000,18 +1076,27 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
const Decl *D = GD.getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // If there is a DISubprogram for this function available then use it.
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ FI = SPCache.find(FD);
+ if (FI != SPCache.end()) {
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
+ if (!SP.isNull() && SP.isSubprogram() && SP.isDefinition()) {
+ RegionStack.push_back(SP.getNode());
+ return;
+ }
+ }
Name = getFunctionName(FD);
- if (Name[0] == '\01')
+ if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
// Use mangled name as linkage name for c/c++ functions.
LinkageName = CGM.getMangledName(GD);
} else {
// Use llvm function name as linkage name.
Name = Fn->getName();
- // Skip the asm prefix if it exists.
- if (Name[0] == '\01')
- Name = Name.substr(1);
LinkageName = Name;
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
}
// It is expected that CurLoc is set before using EmitFunctionStart.
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 8e88988..fddd23b 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -64,6 +64,8 @@ class CGDebugInfo {
/// constructed on demand. For example, C++ destructors, C++ operators etc..
llvm::BumpPtrAllocator FunctionNames;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+
/// Helper functions for getOrCreateType.
llvm::DIType CreateType(const BuiltinType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U);
@@ -85,6 +87,12 @@ class CGDebugInfo {
llvm::DIType CreatePointerLikeType(unsigned Tag,
const Type *Ty, QualType PointeeTy,
llvm::DICompileUnit U);
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
+ llvm::DICompileUnit U,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &E,
+ llvm::DICompositeType &T);
+ void CollectRecordFields(const RecordDecl *Decl, llvm::DICompileUnit U,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &E);
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index dec06e2..7d5b3da 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -742,7 +742,8 @@ public:
return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
case Expr::ObjCStringLiteralClass: {
ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
- llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(SL->getString());
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
case Expr::PredefinedExprClass: {
@@ -764,11 +765,18 @@ public:
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
- if (CE->isBuiltinCall(CGM.getContext()) !=
- Builtin::BI__builtin___CFStringMakeConstantString)
+ unsigned builtin = CE->isBuiltinCall(CGM.getContext());
+ if (builtin !=
+ Builtin::BI__builtin___CFStringMakeConstantString &&
+ builtin !=
+ Builtin::BI__builtin___NSStringMakeConstantString)
break;
const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ if (builtin ==
+ Builtin::BI__builtin___NSStringMakeConstantString) {
+ return CGM.getObjCRuntime().GenerateConstantString(Literal);
+ }
// FIXME: need to deal with UCN conversion issues.
return CGM.GetAddrOfConstantCFString(Literal);
}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index ac391d9..896d220 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -26,7 +26,8 @@ using namespace CodeGen;
/// Emits an instance of NSConstantString representing the object.
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
- llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(E);
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(E->getString());
// FIXME: This bitcast should just be made an invariant on the Runtime.
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index e7a2093..77be9fb 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -124,7 +124,7 @@ private:
void EmitClassRef(const std::string &className);
public:
CGObjCGNU(CodeGen::CodeGenModule &cgm);
- virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *);
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
virtual CodeGen::RValue
GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
@@ -240,15 +240,23 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
Zeros[1] = Zeros[0];
NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
// Get the selector Type.
- SelectorTy = cast<llvm::PointerType>(
- CGM.getTypes().ConvertType(CGM.getContext().getObjCSelType()));
+ QualType selTy = CGM.getContext().getObjCSelType();
+ if (QualType() == selTy) {
+ SelectorTy = PtrToInt8Ty;
+ } else {
+ SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ }
PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
PtrTy = PtrToInt8Ty;
// Object type
ASTIdTy = CGM.getContext().getObjCIdType();
- IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ if (QualType() == ASTIdTy) {
+ IdTy = PtrToInt8Ty;
+ } else {
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ }
// IMP type
std::vector<const llvm::Type*> IMPArgs;
@@ -348,12 +356,9 @@ llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
}
/// Generate an NSConstantString object.
-//TODO: In case there are any crazy people still using the GNU runtime without
-//an OpenStep implementation, this should let them select their own class for
-//constant strings.
-llvm::Constant *CGObjCGNU::GenerateConstantString(const ObjCStringLiteral *SL) {
- std::string Str(SL->getString()->getStrData(),
- SL->getString()->getByteLength());
+llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+ std::string Str(SL->getStrData(), SL->getByteLength());
+
std::vector<llvm::Constant*> Ivars;
Ivars.push_back(NULLPtr);
Ivars.push_back(MakeConstantString(Str));
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 727746f..137ea51 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -952,7 +952,7 @@ public:
CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
CGM(cgm), VMContext(cgm.getLLVMContext()) { }
- virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *SL);
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=0);
@@ -1454,8 +1454,8 @@ llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*/
llvm::Constant *CGObjCCommonMac::GenerateConstantString(
- const ObjCStringLiteral *SL) {
- return CGM.GetAddrOfConstantCFString(SL->getString());
+ const StringLiteral *SL) {
+ return CGM.GetAddrOfConstantCFString(SL);
}
/// Generates a message send where the super is the receiver. This is
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 6b45562..ff5d40b 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -106,7 +106,7 @@ public:
const ObjCMethodDecl *Method) = 0;
/// Generate a constant string object.
- virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *) = 0;
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
/// Generate a category. A category contains a list of methods (and
/// accompanying metadata) and a list of protocols.
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
new file mode 100644
index 0000000..9714bd9
--- /dev/null
+++ b/lib/CodeGen/CGVTT.cpp
@@ -0,0 +1,398 @@
+//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of VTTs (vtable tables).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "clang/AST/RecordLayout.h"
+using namespace clang;
+using namespace CodeGen;
+
+#define D1(x)
+
+namespace {
+class VTTBuilder {
+ /// Inits - The list of values built for the VTT.
+ std::vector<llvm::Constant *> &Inits;
+ /// Class - The most derived class that this vtable is being built for.
+ const CXXRecordDecl *Class;
+ CodeGenModule &CGM; // Per-module state.
+ llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase;
+ /// BLayout - Layout for the most derived class that this vtable is being
+ /// built for.
+ const ASTRecordLayout &BLayout;
+ CGVtableInfo::AddrMap_t &AddressPoints;
+ // vtbl - A pointer to the vtable for Class.
+ llvm::Constant *ClassVtbl;
+ llvm::LLVMContext &VMContext;
+
+ /// SeenVBasesInSecondary - The seen virtual bases when building the
+ /// secondary virtual pointers.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 32> SeenVBasesInSecondary;
+
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies;
+
+ bool GenerateDefinition;
+
+ llvm::DenseMap<BaseSubobject, llvm::Constant *> CtorVtables;
+ llvm::DenseMap<std::pair<const CXXRecordDecl *, BaseSubobject>, uint64_t>
+ CtorVtableAddressPoints;
+
+ llvm::Constant *getCtorVtable(const BaseSubobject &Base) {
+ if (!GenerateDefinition)
+ return 0;
+
+ llvm::Constant *&CtorVtable = CtorVtables[Base];
+ if (!CtorVtable) {
+ // Build the vtable.
+ CGVtableInfo::CtorVtableInfo Info
+ = CGM.getVtableInfo().getCtorVtable(Class, Base);
+
+ CtorVtable = Info.Vtable;
+
+ // Add the address points for this base.
+ for (CGVtableInfo::AddressPointsMapTy::const_iterator I =
+ Info.AddressPoints.begin(), E = Info.AddressPoints.end();
+ I != E; ++I) {
+ uint64_t &AddressPoint =
+ CtorVtableAddressPoints[std::make_pair(Base.getBase(), I->first)];
+
+ // Check if we already have the address points for this base.
+ if (AddressPoint)
+ break;
+
+ // Otherwise, insert it.
+ AddressPoint = I->second;
+ }
+ }
+
+ return CtorVtable;
+ }
+
+
+ /// BuildVtablePtr - Build up a referene to the given secondary vtable
+ llvm::Constant *BuildVtablePtr(llvm::Constant *Vtable,
+ const CXXRecordDecl *VtableClass,
+ const CXXRecordDecl *RD,
+ uint64_t Offset) {
+ if (!GenerateDefinition)
+ return 0;
+
+ uint64_t AddressPoint;
+
+ if (VtableClass != Class) {
+ // We have a ctor vtable, look for the address point in the ctor vtable
+ // address points.
+ AddressPoint =
+ CtorVtableAddressPoints[std::make_pair(VtableClass,
+ BaseSubobject(RD, Offset))];
+ } else {
+ AddressPoint =
+ (*AddressPoints[VtableClass])[std::make_pair(RD, Offset)];
+ }
+
+ // FIXME: We can never have 0 address point. Do this for now so gepping
+ // retains the same structure. Later we'll just assert.
+ if (AddressPoint == 0)
+ AddressPoint = 1;
+ D1(printf("XXX address point for %s in %s layout %s at offset %d was %d\n",
+ RD->getNameAsCString(), VtblClass->getNameAsCString(),
+ Class->getNameAsCString(), (int)Offset, (int)AddressPoint));
+
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 0),
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), AddressPoint)
+ };
+
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(Vtable, Idxs, 2);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ return llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ }
+
+ /// Secondary - Add the secondary vtable pointers to Inits. Offset is the
+ /// current offset in bits to the object we're working on.
+ void Secondary(const CXXRecordDecl *RD, llvm::Constant *vtbl,
+ const CXXRecordDecl *VtblClass, uint64_t Offset=0,
+ bool MorallyVirtual=false) {
+ if (RD->getNumVBases() == 0 && ! MorallyVirtual)
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // We only want to visit each virtual base once.
+ if (i->isVirtual() && SeenVBasesInSecondary.count(Base))
+ continue;
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers are present for all bases with either
+ // virtual bases or virtual function declarations overridden along a
+ // virtual path.
+ //
+ // If the base class is not dynamic, we don't want to add it, nor any
+ // of its base classes.
+ if (!Base->isDynamicClass())
+ continue;
+
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
+ bool NonVirtualPrimaryBase;
+ NonVirtualPrimaryBase = !PrimaryBaseWasVirtual && Base == PrimaryBase;
+ bool BaseMorallyVirtual = MorallyVirtual | i->isVirtual();
+ uint64_t BaseOffset;
+ if (!i->isVirtual()) {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+ } else
+ BaseOffset = BLayout.getVBaseClassOffset(Base);
+ llvm::Constant *subvtbl = vtbl;
+ const CXXRecordDecl *subVtblClass = VtblClass;
+ if ((Base->getNumVBases() || BaseMorallyVirtual)
+ && !NonVirtualPrimaryBase) {
+ llvm::Constant *init;
+ if (BaseMorallyVirtual || VtblClass == Class)
+ init = BuildVtablePtr(vtbl, VtblClass, Base, BaseOffset);
+ else {
+ init = getCtorVtable(BaseSubobject(Base, BaseOffset));
+
+ subvtbl = init;
+ subVtblClass = Base;
+
+ init = BuildVtablePtr(init, Class, Base, BaseOffset);
+ }
+
+ Inits.push_back(init);
+ }
+
+ if (i->isVirtual())
+ SeenVBasesInSecondary.insert(Base);
+
+ Secondary(Base, subvtbl, subVtblClass, BaseOffset, BaseMorallyVirtual);
+ }
+ }
+
+ /// BuiltVTT - Add the VTT to Inits. Offset is the offset in bits to the
+ /// currnet object we're working on.
+ void BuildVTT(const CXXRecordDecl *RD, uint64_t Offset, bool MorallyVirtual) {
+ // Itanium C++ ABI 2.6.2:
+ // An array of virtual table addresses, called the VTT, is declared for
+ // each class type that has indirect or direct virtual base classes.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ // Remember the sub-VTT index.
+ SubVTTIndicies[RD] = Inits.size();
+
+ llvm::Constant *Vtable;
+ const CXXRecordDecl *VtableClass;
+
+ // First comes the primary virtual table pointer...
+ if (MorallyVirtual) {
+ Vtable = ClassVtbl;
+ VtableClass = Class;
+ } else {
+ Vtable = getCtorVtable(BaseSubobject(RD, Offset));
+ VtableClass = RD;
+ }
+
+ llvm::Constant *Init = BuildVtablePtr(Vtable, VtableClass, RD, Offset);
+ Inits.push_back(Init);
+
+ // then the secondary VTTs....
+ SecondaryVTTs(RD, Offset, MorallyVirtual);
+
+ // Make sure to clear the set of seen virtual bases.
+ SeenVBasesInSecondary.clear();
+
+ // and last the secondary vtable pointers.
+ Secondary(RD, Vtable, VtableClass, Offset, MorallyVirtual);
+ }
+
+ /// SecondaryVTTs - Add the secondary VTTs to Inits. The secondary VTTs are
+ /// built from each direct non-virtual proper base that requires a VTT in
+ /// declaration order.
+ void SecondaryVTTs(const CXXRecordDecl *RD, uint64_t Offset=0,
+ bool MorallyVirtual=false) {
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+ if (i->isVirtual())
+ continue;
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+
+ BuildVTT(Base, BaseOffset, MorallyVirtual);
+ }
+ }
+
+ /// VirtualVTTs - Add the VTT for each proper virtual base in inheritance
+ /// graph preorder.
+ void VirtualVTTs(const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+ if (i->isVirtual() && !SeenVBase.count(Base)) {
+ SeenVBase.insert(Base);
+ uint64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
+ BuildVTT(Base, BaseOffset, false);
+ }
+ VirtualVTTs(Base);
+ }
+ }
+
+public:
+ VTTBuilder(std::vector<llvm::Constant *> &inits, const CXXRecordDecl *c,
+ CodeGenModule &cgm, bool GenerateDefinition)
+ : Inits(inits), Class(c), CGM(cgm),
+ BLayout(cgm.getContext().getASTRecordLayout(c)),
+ AddressPoints(*cgm.getVtableInfo().AddressPoints[c]),
+ VMContext(cgm.getModule().getContext()),
+ GenerateDefinition(GenerateDefinition) {
+
+ // First comes the primary virtual table pointer for the complete class...
+ ClassVtbl = GenerateDefinition ? CGM.getVtableInfo().getVtable(Class) : 0;
+
+ llvm::Constant *Init = BuildVtablePtr(ClassVtbl, Class, Class, 0);
+ Inits.push_back(Init);
+
+ // then the secondary VTTs...
+ SecondaryVTTs(Class);
+
+ // Make sure to clear the set of seen virtual bases.
+ SeenVBasesInSecondary.clear();
+
+ // then the secondary vtable pointers...
+ Secondary(Class, ClassVtbl, Class);
+
+ // and last, the virtual VTTs.
+ VirtualVTTs(Class);
+ }
+
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getSubVTTIndicies() {
+ return SubVTTIndicies;
+ }
+};
+}
+
+llvm::GlobalVariable *
+CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
+ const CXXRecordDecl *RD) {
+ // Only classes that have virtual bases need a VTT.
+ if (RD->getNumVBases() == 0)
+ return 0;
+
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXVTT(RD, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ D1(printf("vtt %s\n", RD->getNameAsCString()));
+
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV == 0 || GV->isDeclaration()) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+ std::vector<llvm::Constant *> inits;
+ VTTBuilder b(inits, RD, CGM, GenerateDefinition);
+
+ const llvm::ArrayType *Type = llvm::ArrayType::get(Int8PtrTy, inits.size());
+ llvm::Constant *Init = 0;
+ if (GenerateDefinition)
+ Init = llvm::ConstantArray::get(Type, inits);
+
+ llvm::GlobalVariable *OldGV = GV;
+ GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
+ Linkage, Init, Name);
+ CGM.setGlobalVisibility(GV, RD);
+
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+ }
+
+ return GV;
+}
+
+CGVtableInfo::CtorVtableInfo
+CGVtableInfo::getCtorVtable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base) {
+ CtorVtableInfo Info;
+
+ Info.Vtable = GenerateVtable(llvm::GlobalValue::InternalLinkage,
+ /*GenerateDefinition=*/true,
+ RD, Base.getBase(), Base.getBaseOffset(),
+ Info.AddressPoints);
+ return Info;
+}
+
+llvm::GlobalVariable *CGVtableInfo::getVTT(const CXXRecordDecl *RD) {
+ return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
+ /*GenerateDefinition=*/false, RD);
+
+}
+
+
+bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // We don't have any virtual bases, just return early.
+ if (!MD->getParent()->getNumVBases())
+ return false;
+
+ // Check if we have a base constructor.
+ if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+ return true;
+
+ // Check if we have a base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return true;
+
+ return false;
+}
+
+uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Base) {
+ ClassPairTy ClassPair(RD, Base);
+
+ SubVTTIndiciesTy::iterator I =
+ SubVTTIndicies.find(ClassPair);
+ if (I != SubVTTIndicies.end())
+ return I->second;
+
+ std::vector<llvm::Constant *> inits;
+ VTTBuilder Builder(inits, RD, CGM, /*GenerateDefinition=*/false);
+
+ for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+ Builder.getSubVTTIndicies().begin(),
+ E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ // Insert all indices.
+ ClassPairTy ClassPair(RD, I->first);
+
+ SubVTTIndicies.insert(std::make_pair(ClassPair, I->second));
+ }
+
+ I = SubVTTIndicies.find(ClassPair);
+ assert(I != SubVTTIndicies.end() && "Did not find index!");
+
+ return I->second;
+}
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp
index ae900d6..e5abfc6 100644
--- a/lib/CodeGen/CGVtable.cpp
+++ b/lib/CodeGen/CGVtable.cpp
@@ -232,8 +232,8 @@ public:
return llvm::ConstantExpr::getBitCast(m, Ptr8Ty);
}
-#define D1(x)
-//#define D1(X) do { if (getenv("DEBUG")) { X; } } while (0)
+//#define D1(x)
+#define D1(X) do { if (getenv("DEBUG")) { X; } } while (0)
void GenerateVBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
bool updateVBIndex, Index_t current_vbindex) {
@@ -254,7 +254,7 @@ public:
D1(printf(" vbase for %s at %d delta %d most derived %s\n",
Base->getNameAsCString(),
(int)-VCalls.size()-3, (int)BaseOffset,
- Class->getNameAsCString()));
+ MostDerivedClass->getNameAsCString()));
}
// We also record offsets for non-virtual bases to closest enclosing
// virtual base. We do this so that we don't have to search
@@ -376,13 +376,14 @@ public:
CurrentVBaseOffset))
return;
+ D1(printf(" vfn for %s at %d\n",
+ dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsCString(),
+ (int)Methods.size()));
+
// We didn't find an entry in the vtable that we could use, add a new
// entry.
Methods.AddMethod(GD);
- D1(printf(" vfn for %s at %d\n", MD->getNameAsString().c_str(),
- (int)Index[GD]));
-
VCallOffset[GD] = Offset/8;
if (MorallyVirtual) {
Index_t &idx = VCall[GD];
@@ -392,7 +393,8 @@ public:
idx = VCalls.size()+1;
VCalls.push_back(0);
D1(printf(" vcall for %s at %d with delta %d\n",
- MD->getNameAsString().c_str(), (int)-VCalls.size()-3, 0));
+ dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsCString(),
+ (int)-VCalls.size()-3, 0));
}
}
}
@@ -429,12 +431,11 @@ public:
continue;
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- if (Base != PrimaryBase || PrimaryBaseWasVirtual) {
- uint64_t o = Offset + Layout.getBaseClassOffset(Base);
- StartNewTable();
- GenerateVtableForBase(Base, o, MorallyVirtual, false,
- CurrentVBaseOffset, Path);
- }
+ uint64_t o = Offset + Layout.getBaseClassOffset(Base);
+ StartNewTable();
+ GenerateVtableForBase(Base, o, MorallyVirtual, false,
+ true, Base == PrimaryBase && !PrimaryBaseWasVirtual,
+ CurrentVBaseOffset, Path);
}
Path->pop_back();
}
@@ -463,7 +464,7 @@ public:
void AddAddressPoints(const CXXRecordDecl *RD, uint64_t Offset,
Index_t AddressPoint) {
D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n",
- RD->getNameAsCString(), Class->getNameAsCString(),
+ RD->getNameAsCString(), MostDerivedClass->getNameAsCString(),
LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
@@ -480,7 +481,7 @@ public:
BLayout.getVBaseClassOffset(RD) != Offset)
break;
D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n",
- RD->getNameAsCString(), Class->getNameAsCString(),
+ RD->getNameAsCString(), MostDerivedClass->getNameAsCString(),
LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
@@ -491,6 +492,7 @@ public:
void FinishGenerateVtable(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout,
const CXXRecordDecl *PrimaryBase,
+ bool ForNPNVBases, bool WasPrimaryBase,
bool PrimaryBaseWasVirtual,
bool MorallyVirtual, int64_t Offset,
bool ForVirtualBase, int64_t CurrentVBaseOffset,
@@ -503,23 +505,27 @@ public:
StartNewTable();
extra = 0;
- bool DeferVCalls = MorallyVirtual || ForVirtualBase;
- int VCallInsertionPoint = VtableComponents.size();
- if (!DeferVCalls) {
- insertVCalls(VCallInsertionPoint);
- } else
- // FIXME: just for extra, or for all uses of VCalls.size post this?
- extra = -VCalls.size();
+ Index_t AddressPoint = 0;
+ int VCallInsertionPoint = 0;
+ if (!ForNPNVBases || !WasPrimaryBase) {
+ bool DeferVCalls = MorallyVirtual || ForVirtualBase;
+ VCallInsertionPoint = VtableComponents.size();
+ if (!DeferVCalls) {
+ insertVCalls(VCallInsertionPoint);
+ } else
+ // FIXME: just for extra, or for all uses of VCalls.size post this?
+ extra = -VCalls.size();
- // Add the offset to top.
- VtableComponents.push_back(BuildVtable ? wrap(-((Offset-LayoutOffset)/8)) : 0);
+ // Add the offset to top.
+ VtableComponents.push_back(BuildVtable ? wrap(-((Offset-LayoutOffset)/8)) : 0);
- // Add the RTTI information.
- VtableComponents.push_back(rtti);
+ // Add the RTTI information.
+ VtableComponents.push_back(rtti);
- Index_t AddressPoint = VtableComponents.size();
+ AddressPoint = VtableComponents.size();
- AppendMethodsToVtable();
+ AppendMethodsToVtable();
+ }
// and then the non-virtual bases.
NonVirtualBases(RD, Layout, PrimaryBase, PrimaryBaseWasVirtual,
@@ -537,7 +543,8 @@ public:
insertVCalls(VCallInsertionPoint);
}
- AddAddressPoints(RD, Offset, AddressPoint);
+ if (!ForNPNVBases || !WasPrimaryBase)
+ AddAddressPoints(RD, Offset, AddressPoint);
if (alloc) {
delete Path;
@@ -557,13 +564,13 @@ public:
// vtables are composed from the chain of primaries.
if (PrimaryBase && !PrimaryBaseWasVirtual) {
D1(printf(" doing primaries for %s most derived %s\n",
- RD->getNameAsCString(), Class->getNameAsCString()));
+ RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
updateVBIndex, current_vbindex, CurrentVBaseOffset);
}
D1(printf(" doing vcall entries for %s most derived %s\n",
- RD->getNameAsCString(), Class->getNameAsCString()));
+ RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
// And add the virtuals for the class to the primary vtable.
AddMethods(RD, MorallyVirtual, Offset, CurrentVBaseOffset);
@@ -589,7 +596,7 @@ public:
}
D1(printf(" doing primaries for %s most derived %s\n",
- RD->getNameAsCString(), Class->getNameAsCString()));
+ RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
VBPrimaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
updateVBIndex, current_vbindex, PrimaryBaseWasVirtual,
@@ -597,7 +604,7 @@ public:
}
D1(printf(" doing vbase entries for %s most derived %s\n",
- RD->getNameAsCString(), Class->getNameAsCString()));
+ RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
GenerateVBaseOffsets(RD, Offset, updateVBIndex, current_vbindex);
if (RDisVirtualBase || bottom) {
@@ -609,6 +616,8 @@ public:
void GenerateVtableForBase(const CXXRecordDecl *RD, int64_t Offset = 0,
bool MorallyVirtual = false,
bool ForVirtualBase = false,
+ bool ForNPNVBases = false,
+ bool WasPrimaryBase = true,
int CurrentVBaseOffset = 0,
Path_t *Path = 0) {
if (!RD->isDynamicClass())
@@ -626,20 +635,22 @@ public:
extra = 0;
D1(printf("building entries for base %s most derived %s\n",
- RD->getNameAsCString(), Class->getNameAsCString()));
+ RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
if (ForVirtualBase)
extra = VCalls.size();
- VBPrimaries(RD, MorallyVirtual, Offset, !ForVirtualBase, 0, ForVirtualBase,
- CurrentVBaseOffset, true);
+ if (!ForNPNVBases || !WasPrimaryBase) {
+ VBPrimaries(RD, MorallyVirtual, Offset, !ForVirtualBase, 0,
+ ForVirtualBase, CurrentVBaseOffset, true);
- if (Path)
- OverrideMethods(Path, MorallyVirtual, Offset, CurrentVBaseOffset);
+ if (Path)
+ OverrideMethods(Path, MorallyVirtual, Offset, CurrentVBaseOffset);
+ }
- FinishGenerateVtable(RD, Layout, PrimaryBase, PrimaryBaseWasVirtual,
- MorallyVirtual, Offset, ForVirtualBase,
- CurrentVBaseOffset, Path);
+ FinishGenerateVtable(RD, Layout, PrimaryBase, ForNPNVBases, WasPrimaryBase,
+ PrimaryBaseWasVirtual, MorallyVirtual, Offset,
+ ForVirtualBase, CurrentVBaseOffset, Path);
}
void GenerateVtableForVBases(const CXXRecordDecl *RD,
@@ -665,9 +676,9 @@ public:
int64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
int64_t CurrentVBaseOffset = BaseOffset;
D1(printf("vtable %s virtual base %s\n",
- Class->getNameAsCString(), Base->getNameAsCString()));
- GenerateVtableForBase(Base, BaseOffset, true, true, CurrentVBaseOffset,
- Path);
+ MostDerivedClass->getNameAsCString(), Base->getNameAsCString()));
+ GenerateVtableForBase(Base, BaseOffset, true, true, false,
+ true, CurrentVBaseOffset, Path);
}
int64_t BaseOffset;
if (i->isVirtual())
@@ -823,14 +834,14 @@ bool VtableBuilder::OverrideMethod(GlobalDecl GD, bool MorallyVirtual,
VCalls.push_back(0);
D1(printf(" vcall for %s at %d with delta %d most derived %s\n",
MD->getNameAsString().c_str(), (int)-idx-3,
- (int)VCalls[idx-1], Class->getNameAsCString()));
+ (int)VCalls[idx-1], MostDerivedClass->getNameAsCString()));
} else {
NonVirtualOffset[GD] = NonVirtualOffset[OGD];
VCallOffset[GD] = VCallOffset[OGD];
VCalls[idx-1] = -VCallOffset[OGD] + OverrideOffset/8;
D1(printf(" vcall patch for %s at %d with delta %d most derived %s\n",
MD->getNameAsString().c_str(), (int)-idx-3,
- (int)VCalls[idx-1], Class->getNameAsCString()));
+ (int)VCalls[idx-1], MostDerivedClass->getNameAsCString()));
}
int64_t NonVirtualAdjustment = NonVirtualOffset[GD];
int64_t VirtualAdjustment =
@@ -1205,284 +1216,6 @@ CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
return GV;
}
-namespace {
-class VTTBuilder {
- /// Inits - The list of values built for the VTT.
- std::vector<llvm::Constant *> &Inits;
- /// Class - The most derived class that this vtable is being built for.
- const CXXRecordDecl *Class;
- CodeGenModule &CGM; // Per-module state.
- llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase;
- /// BLayout - Layout for the most derived class that this vtable is being
- /// built for.
- const ASTRecordLayout &BLayout;
- CGVtableInfo::AddrMap_t &AddressPoints;
- // vtbl - A pointer to the vtable for Class.
- llvm::Constant *ClassVtbl;
- llvm::LLVMContext &VMContext;
-
- llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies;
-
- bool GenerateDefinition;
-
- llvm::DenseMap<BaseSubobject, llvm::Constant *> CtorVtables;
- llvm::DenseMap<std::pair<const CXXRecordDecl *, BaseSubobject>, uint64_t>
- CtorVtableAddressPoints;
-
- llvm::Constant *getCtorVtable(const BaseSubobject &Base) {
- if (!GenerateDefinition)
- return 0;
-
- llvm::Constant *&CtorVtable = CtorVtables[Base];
- if (!CtorVtable) {
- // Build the vtable.
- CGVtableInfo::CtorVtableInfo Info
- = CGM.getVtableInfo().getCtorVtable(Class, Base);
-
- CtorVtable = Info.Vtable;
-
- // Add the address points for this base.
- for (CGVtableInfo::AddressPointsMapTy::const_iterator I =
- Info.AddressPoints.begin(), E = Info.AddressPoints.end();
- I != E; ++I) {
- uint64_t &AddressPoint =
- CtorVtableAddressPoints[std::make_pair(Base.getBase(), I->first)];
-
- // Check if we already have the address points for this base.
- if (AddressPoint)
- break;
-
- // Otherwise, insert it.
- AddressPoint = I->second;
- }
- }
-
- return CtorVtable;
- }
-
-
- /// BuildVtablePtr - Build up a referene to the given secondary vtable
- llvm::Constant *BuildVtablePtr(llvm::Constant *Vtable,
- const CXXRecordDecl *VtableClass,
- const CXXRecordDecl *RD,
- uint64_t Offset) {
- int64_t AddressPoint =
- (*AddressPoints[VtableClass])[std::make_pair(RD, Offset)];
-
- // FIXME: We can never have 0 address point. Do this for now so gepping
- // retains the same structure. Later we'll just assert.
- if (AddressPoint == 0)
- AddressPoint = 1;
- D1(printf("XXX address point for %s in %s layout %s at offset %d was %d\n",
- RD->getNameAsCString(), VtblClass->getNameAsCString(),
- Class->getNameAsCString(), (int)Offset, (int)AddressPoint));
-
- llvm::Value *Idxs[] = {
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 0),
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), AddressPoint)
- };
-
- llvm::Constant *Init =
- llvm::ConstantExpr::getInBoundsGetElementPtr(Vtable, Idxs, 2);
-
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- return llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
- }
-
- /// Secondary - Add the secondary vtable pointers to Inits. Offset is the
- /// current offset in bits to the object we're working on.
- void Secondary(const CXXRecordDecl *RD, llvm::Constant *vtbl,
- const CXXRecordDecl *VtblClass, uint64_t Offset=0,
- bool MorallyVirtual=false) {
- if (RD->getNumVBases() == 0 && ! MorallyVirtual)
- return;
-
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
- bool NonVirtualPrimaryBase;
- NonVirtualPrimaryBase = !PrimaryBaseWasVirtual && Base == PrimaryBase;
- bool BaseMorallyVirtual = MorallyVirtual | i->isVirtual();
- uint64_t BaseOffset;
- if (!i->isVirtual()) {
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- BaseOffset = Offset + Layout.getBaseClassOffset(Base);
- } else
- BaseOffset = BLayout.getVBaseClassOffset(Base);
- llvm::Constant *subvtbl = vtbl;
- const CXXRecordDecl *subVtblClass = VtblClass;
- if ((Base->getNumVBases() || BaseMorallyVirtual)
- && !NonVirtualPrimaryBase) {
- // FIXME: Slightly too many of these for __ZTT8test8_B2
- llvm::Constant *init;
- if (BaseMorallyVirtual)
- init = GenerateDefinition ?
- BuildVtablePtr(vtbl, VtblClass, RD, Offset) : 0;
- else {
- init = GenerateDefinition ?
- getCtorVtable(BaseSubobject(Base, BaseOffset)) : 0;
-
- subvtbl = init;
- subVtblClass = Base;
-
- init = GenerateDefinition ?
- BuildVtablePtr(init, Class, Base, BaseOffset) : 0;
- }
- Inits.push_back(init);
- }
- Secondary(Base, subvtbl, subVtblClass, BaseOffset, BaseMorallyVirtual);
- }
- }
-
- /// BuiltVTT - Add the VTT to Inits. Offset is the offset in bits to the
- /// currnet object we're working on.
- void BuildVTT(const CXXRecordDecl *RD, uint64_t Offset, bool MorallyVirtual) {
- if (RD->getNumVBases() == 0 && !MorallyVirtual)
- return;
-
- llvm::Constant *Vtable;
- const CXXRecordDecl *VtableClass;
-
- // First comes the primary virtual table pointer...
- if (MorallyVirtual) {
- Vtable = GenerateDefinition ? ClassVtbl : 0;
- VtableClass = Class;
- } else {
- Vtable = GenerateDefinition ?
- getCtorVtable(BaseSubobject(RD, Offset)) : 0;
- VtableClass = RD;
- }
-
- llvm::Constant *Init = GenerateDefinition ?
- BuildVtablePtr(Vtable, VtableClass, RD, Offset) : 0;
- Inits.push_back(Init);
-
- // then the secondary VTTs....
- SecondaryVTTs(RD, Offset, MorallyVirtual);
-
- // and last the secondary vtable pointers.
- Secondary(RD, Vtable, VtableClass, Offset, MorallyVirtual);
- }
-
- /// SecondaryVTTs - Add the secondary VTTs to Inits. The secondary VTTs are
- /// built from each direct non-virtual proper base that requires a VTT in
- /// declaration order.
- void SecondaryVTTs(const CXXRecordDecl *RD, uint64_t Offset=0,
- bool MorallyVirtual=false) {
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- if (i->isVirtual())
- continue;
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base);
-
- // Remember the sub-VTT index.
- SubVTTIndicies[Base] = Inits.size();
-
- BuildVTT(Base, BaseOffset, MorallyVirtual);
- }
- }
-
- /// VirtualVTTs - Add the VTT for each proper virtual base in inheritance
- /// graph preorder.
- void VirtualVTTs(const CXXRecordDecl *RD) {
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- if (i->isVirtual() && !SeenVBase.count(Base)) {
- // Remember the sub-VTT index.
- SubVTTIndicies[Base] = Inits.size();
-
- SeenVBase.insert(Base);
- uint64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
- BuildVTT(Base, BaseOffset, true);
- }
- VirtualVTTs(Base);
- }
- }
-
-public:
- VTTBuilder(std::vector<llvm::Constant *> &inits, const CXXRecordDecl *c,
- CodeGenModule &cgm, bool GenerateDefinition)
- : Inits(inits), Class(c), CGM(cgm),
- BLayout(cgm.getContext().getASTRecordLayout(c)),
- AddressPoints(*cgm.getVtableInfo().AddressPoints[c]),
- VMContext(cgm.getModule().getContext()),
- GenerateDefinition(GenerateDefinition) {
-
- // First comes the primary virtual table pointer for the complete class...
- ClassVtbl = CGM.getVtableInfo().getVtable(Class);
- llvm::Constant *Init = GenerateDefinition ?
- BuildVtablePtr(ClassVtbl, Class, Class, 0) : 0;
- Inits.push_back(Init);
-
- // then the secondary VTTs...
- SecondaryVTTs(Class);
-
- // then the secondary vtable pointers...
- Secondary(Class, ClassVtbl, Class);
-
- // and last, the virtual VTTs.
- VirtualVTTs(Class);
- }
-
- llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getSubVTTIndicies() {
- return SubVTTIndicies;
- }
-};
-}
-
-llvm::GlobalVariable *
-CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition,
- const CXXRecordDecl *RD) {
- // Only classes that have virtual bases need a VTT.
- if (RD->getNumVBases() == 0)
- return 0;
-
- llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXVTT(RD, OutName);
- llvm::StringRef Name = OutName.str();
-
- D1(printf("vtt %s\n", RD->getNameAsCString()));
-
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (GV == 0 || GV->isDeclaration()) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
-
- std::vector<llvm::Constant *> inits;
- VTTBuilder b(inits, RD, CGM, GenerateDefinition);
-
- const llvm::ArrayType *Type = llvm::ArrayType::get(Int8PtrTy, inits.size());
- llvm::Constant *Init = 0;
- if (GenerateDefinition)
- Init = llvm::ConstantArray::get(Type, inits);
-
- llvm::GlobalVariable *OldGV = GV;
- GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
- Linkage, Init, Name);
- CGM.setGlobalVisibility(GV, RD);
-
- if (OldGV) {
- GV->takeName(OldGV);
- llvm::Constant *NewPtr =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtr);
- OldGV->eraseFromParent();
- }
- }
-
- return GV;
-}
-
void CGVtableInfo::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
llvm::GlobalVariable *&Vtable = Vtables[RD];
@@ -1510,25 +1243,6 @@ llvm::GlobalVariable *CGVtableInfo::getVtable(const CXXRecordDecl *RD) {
return Vtable;
}
-CGVtableInfo::CtorVtableInfo
-CGVtableInfo::getCtorVtable(const CXXRecordDecl *RD,
- const BaseSubobject &Base) {
- CtorVtableInfo Info;
-
- Info.Vtable = GenerateVtable(llvm::GlobalValue::InternalLinkage,
- /*GenerateDefinition=*/true,
- RD, Base.getBase(), Base.getBaseOffset(),
- Info.AddressPoints);
- return Info;
-}
-
-llvm::GlobalVariable *CGVtableInfo::getVTT(const CXXRecordDecl *RD) {
- return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
- /*GenerateDefinition=*/false, RD);
-
-}
-
-
void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const CXXRecordDecl *RD = MD->getParent();
@@ -1562,47 +1276,3 @@ void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
}
}
-bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- // We don't have any virtual bases, just return early.
- if (!MD->getParent()->getNumVBases())
- return false;
-
- // Check if we have a base constructor.
- if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
- return true;
-
- // Check if we have a base destructor.
- if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
- return true;
-
- return false;
-}
-
-uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD,
- const CXXRecordDecl *Base) {
- ClassPairTy ClassPair(RD, Base);
-
- SubVTTIndiciesTy::iterator I =
- SubVTTIndicies.find(ClassPair);
- if (I != SubVTTIndicies.end())
- return I->second;
-
- std::vector<llvm::Constant *> inits;
- VTTBuilder Builder(inits, RD, CGM, /*GenerateDefinition=*/false);
-
- for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
- Builder.getSubVTTIndicies().begin(),
- E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
- // Insert all indices.
- ClassPairTy ClassPair(RD, I->first);
-
- SubVTTIndicies.insert(std::make_pair(ClassPair, I->second));
- }
-
- I = SubVTTIndicies.find(ClassPair);
- assert(I != SubVTTIndicies.end() && "Did not find index!");
-
- return I->second;
-}
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 45469d3..e72a1d9 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -24,6 +24,7 @@ add_clang_library(clangCodeGen
CGStmt.cpp
CGTemporaries.cpp
CGVtable.cpp
+ CGVTT.cpp
CodeGenFunction.cpp
CodeGenModule.cpp
CodeGenTypes.cpp
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 5ecc30e..cf504a7 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -67,6 +67,15 @@ CodeGenModule::~CodeGenModule() {
delete DebugInfo;
}
+void CodeGenModule::createObjCRuntime() {
+ if (!Features.NeXTRuntime)
+ Runtime = CreateGNUObjCRuntime(*this);
+ else if (Features.ObjCNonFragileABI)
+ Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+ else
+ Runtime = CreateMacObjCRuntime(*this);
+}
+
void CodeGenModule::Release() {
EmitDeferred();
EmitCXXGlobalInitFunc();
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index c7aa7a4..81f3979 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -161,6 +161,9 @@ class CodeGenModule : public BlockModule {
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *CFConstantStringClassRef;
+ /// Lazily create the Objective-C runtime
+ void createObjCRuntime();
+
llvm::LLVMContext &VMContext;
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
@@ -174,7 +177,7 @@ public:
/// getObjCRuntime() - Return a reference to the configured
/// Objective-C runtime.
CGObjCRuntime &getObjCRuntime() {
- assert(Runtime && "No Objective-C runtime has been configured.");
+ if (!Runtime) createObjCRuntime();
return *Runtime;
}
diff --git a/lib/CodeGen/TargetABIInfo.cpp b/lib/CodeGen/TargetABIInfo.cpp
deleted file mode 100644
index 863a297..0000000
--- a/lib/CodeGen/TargetABIInfo.cpp
+++ /dev/null
@@ -1,1821 +0,0 @@
-//===---- TargetABIInfo.cpp - Encapsulate target ABI details ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// These classes wrap the information about a call or function
-// definition used to handle ABI compliancy.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ABIInfo.h"
-#include "CodeGenFunction.h"
-#include "clang/AST/RecordLayout.h"
-#include "llvm/Type.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace clang;
-using namespace CodeGen;
-
-ABIInfo::~ABIInfo() {}
-
-void ABIArgInfo::dump() const {
- llvm::raw_ostream &OS = llvm::errs();
- OS << "(ABIArgInfo Kind=";
- switch (TheKind) {
- case Direct:
- OS << "Direct";
- break;
- case Extend:
- OS << "Extend";
- break;
- case Ignore:
- OS << "Ignore";
- break;
- case Coerce:
- OS << "Coerce Type=";
- getCoerceToType()->print(OS);
- break;
- case Indirect:
- OS << "Indirect Align=" << getIndirectAlign();
- break;
- case Expand:
- OS << "Expand";
- break;
- }
- OS << ")\n";
-}
-
-static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
-
-/// isEmptyField - Return true iff a the field is "empty", that is it
-/// is an unnamed bit-field or an (array of) empty record(s).
-static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
- bool AllowArrays) {
- if (FD->isUnnamedBitfield())
- return true;
-
- QualType FT = FD->getType();
-
- // Constant arrays of empty records count as empty, strip them off.
- if (AllowArrays)
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
- FT = AT->getElementType();
-
- return isEmptyRecord(Context, FT, AllowArrays);
-}
-
-/// isEmptyRecord - Return true iff a structure contains only empty
-/// fields. Note that a structure with a flexible array member is not
-/// considered empty.
-static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return 0;
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i)
- if (!isEmptyField(Context, *i, AllowArrays))
- return false;
- return true;
-}
-
-/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
-/// a non-trivial destructor or a non-trivial copy constructor.
-static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return false;
-
- return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
-}
-
-/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
-/// a record type with either a non-trivial destructor or a non-trivial copy
-/// constructor.
-static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return false;
-
- return hasNonTrivialDestructorOrCopyConstructor(RT);
-}
-
-/// isSingleElementStruct - Determine if a structure is a "single
-/// element struct", i.e. it has exactly one non-empty field or
-/// exactly one field which is itself a single element
-/// struct. Structures with flexible array members are never
-/// considered single element structs.
-///
-/// \return The field declaration for the single non-empty field, if
-/// it exists.
-static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
- const RecordType *RT = T->getAsStructureType();
- if (!RT)
- return 0;
-
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return 0;
-
- const Type *Found = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i) {
- const FieldDecl *FD = *i;
- QualType FT = FD->getType();
-
- // Ignore empty fields.
- if (isEmptyField(Context, FD, true))
- continue;
-
- // If we already found an element then this isn't a single-element
- // struct.
- if (Found)
- return 0;
-
- // Treat single element arrays as the element.
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() != 1)
- break;
- FT = AT->getElementType();
- }
-
- if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
- Found = FT.getTypePtr();
- } else {
- Found = isSingleElementStruct(FT, Context);
- if (!Found)
- return 0;
- }
- }
-
- return Found;
-}
-
-static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
- if (!Ty->getAs<BuiltinType>() && !Ty->isAnyPointerType() &&
- !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
- !Ty->isBlockPointerType())
- return false;
-
- uint64_t Size = Context.getTypeSize(Ty);
- return Size == 32 || Size == 64;
-}
-
-/// canExpandIndirectArgument - Test whether an argument type which is to be
-/// passed indirectly (on the stack) would have the equivalent layout if it was
-/// expanded into separate arguments. If so, we prefer to do the latter to avoid
-/// inhibiting optimizations.
-///
-// FIXME: This predicate is missing many cases, currently it just follows
-// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
-// should probably make this smarter, or better yet make the LLVM backend
-// capable of handling it.
-static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
- // We can only expand structure types.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
- return false;
-
- // We can only expand (C) structures.
- //
- // FIXME: This needs to be generalized to handle classes as well.
- const RecordDecl *RD = RT->getDecl();
- if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
- return false;
-
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i) {
- const FieldDecl *FD = *i;
-
- if (!is32Or64BitBasicType(FD->getType(), Context))
- return false;
-
- // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
- // how to expand them yet, and the predicate for telling if a bitfield still
- // counts as "basic" is more complicated than what we were doing previously.
- if (FD->isBitField())
- return false;
- }
-
- return true;
-}
-
-static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i) {
- const FieldDecl *FD = *i;
-
- if (FD->getType()->isVectorType() &&
- Context.getTypeSize(FD->getType()) >= 128)
- return true;
-
- if (const RecordType* RT = FD->getType()->getAs<RecordType>())
- if (typeContainsSSEVector(RT->getDecl(), Context))
- return true;
- }
-
- return false;
-}
-
-namespace {
-/// DefaultABIInfo - The default implementation for ABI specific
-/// details. This implementation provides information which results in
-/// self-consistent and sensible LLVM IR generation, but does not
-/// conform to any particular ABI.
-class DefaultABIInfo : public ABIInfo {
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-/// X86_32ABIInfo - The X86-32 ABI information.
-class X86_32ABIInfo : public ABIInfo {
- ASTContext &Context;
- bool IsDarwinVectorABI;
- bool IsSmallStructInRegABI;
-
- static bool isRegisterSize(unsigned Size) {
- return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
- }
-
- static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
-
- static unsigned getIndirectArgumentAlignment(QualType Ty,
- ASTContext &Context);
-
-public:
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-
- X86_32ABIInfo(ASTContext &Context, bool d, bool p)
- : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
- IsSmallStructInRegABI(p) {}
-};
-}
-
-
-/// shouldReturnTypeInRegister - Determine if the given type should be
-/// passed in a register (for the Darwin ABI).
-bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
- ASTContext &Context) {
- uint64_t Size = Context.getTypeSize(Ty);
-
- // Type must be register sized.
- if (!isRegisterSize(Size))
- return false;
-
- if (Ty->isVectorType()) {
- // 64- and 128- bit vectors inside structures are not returned in
- // registers.
- if (Size == 64 || Size == 128)
- return false;
-
- return true;
- }
-
- // If this is a builtin, pointer, enum, or complex type, it is ok.
- if (Ty->getAs<BuiltinType>() || Ty->isAnyPointerType() ||
- Ty->isAnyComplexType() || Ty->isEnumeralType() ||
- Ty->isBlockPointerType())
- return true;
-
- // Arrays are treated like records.
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
- return shouldReturnTypeInRegister(AT->getElementType(), Context);
-
- // Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT) return false;
-
- // Structure types are passed in register if all fields would be
- // passed in a register.
- for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
- e = RT->getDecl()->field_end(); i != e; ++i) {
- const FieldDecl *FD = *i;
-
- // Empty fields are ignored.
- if (isEmptyField(Context, FD, true))
- continue;
-
- // Check fields recursively.
- if (!shouldReturnTypeInRegister(FD->getType(), Context))
- return false;
- }
-
- return true;
-}
-
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
- // On Darwin, some vectors are returned in registers.
- if (IsDarwinVectorABI) {
- uint64_t Size = Context.getTypeSize(RetTy);
-
- // 128-bit vectors are a special case; they are returned in
- // registers and we need to make sure to pick a type the LLVM
- // backend will like.
- if (Size == 128)
- return ABIArgInfo::getCoerce(llvm::VectorType::get(
- llvm::Type::getInt64Ty(VMContext), 2));
-
- // Always return in register if it fits in a general purpose
- // register, or if it is 64 bits and has a single element.
- if ((Size == 8 || Size == 16 || Size == 32) ||
- (Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
-
- return ABIArgInfo::getIndirect(0);
- }
-
- return ABIArgInfo::getDirect();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- if (const RecordType *RT = RetTy->getAsStructureType()) {
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
-
- // Structures with flexible arrays are always indirect.
- if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(0);
- }
-
- // If specified, structs and unions are always indirect.
- if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
- return ABIArgInfo::getIndirect(0);
-
- // Classify "single element" structs as their element type.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
- if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
- if (BT->isIntegerType()) {
- // We need to use the size of the structure, padding
- // bit-fields can adjust that to be larger than the single
- // element type.
- uint64_t Size = Context.getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(
- llvm::IntegerType::get(VMContext, (unsigned) Size));
- } else if (BT->getKind() == BuiltinType::Float) {
- assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
- } else if (BT->getKind() == BuiltinType::Double) {
- assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
- }
- } else if (SeltTy->isPointerType()) {
- // FIXME: It would be really nice if this could come out as the proper
- // pointer type.
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- return ABIArgInfo::getCoerce(PtrTy);
- } else if (SeltTy->isVectorType()) {
- // 64- and 128-bit vectors are never returned in a
- // register when inside a structure.
- uint64_t Size = Context.getTypeSize(RetTy);
- if (Size == 64 || Size == 128)
- return ABIArgInfo::getIndirect(0);
-
- return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
- }
- }
-
- // Small structures which are register sized are generally returned
- // in a register.
- if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
- uint64_t Size = Context.getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
- }
-
- return ABIArgInfo::getIndirect(0);
- } else {
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
-}
-
-unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
- ASTContext &Context) {
- unsigned Align = Context.getTypeAlign(Ty);
- if (Align < 128) return 0;
- if (const RecordType* RT = Ty->getAs<RecordType>())
- if (typeContainsSSEVector(RT->getDecl(), Context))
- return 16;
- return 0;
-}
-
-ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- // FIXME: Set alignment on indirect arguments.
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
- // Structures with flexible arrays are always indirect.
- if (const RecordType *RT = Ty->getAsStructureType())
- if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
- Context));
-
- // Ignore empty structs.
- if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
- return ABIArgInfo::getIgnore();
-
- // Expand small (<= 128-bit) record types when we know that the stack layout
- // of those arguments will match the struct. This is important because the
- // LLVM backend isn't smart enough to remove byval, which inhibits many
- // optimizations.
- if (Context.getTypeSize(Ty) <= 4*32 &&
- canExpandIndirectArgument(Ty, Context))
- return ABIArgInfo::getExpand();
-
- return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
- } else {
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
-}
-
-llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-namespace {
-/// X86_64ABIInfo - The X86_64 ABI information.
-class X86_64ABIInfo : public ABIInfo {
- enum Class {
- Integer = 0,
- SSE,
- SSEUp,
- X87,
- X87Up,
- ComplexX87,
- NoClass,
- Memory
- };
-
- /// merge - Implement the X86_64 ABI merging algorithm.
- ///
- /// Merge an accumulating classification \arg Accum with a field
- /// classification \arg Field.
- ///
- /// \param Accum - The accumulating classification. This should
- /// always be either NoClass or the result of a previous merge
- /// call. In addition, this should never be Memory (the caller
- /// should just return Memory for the aggregate).
- Class merge(Class Accum, Class Field) const;
-
- /// classify - Determine the x86_64 register classes in which the
- /// given type T should be passed.
- ///
- /// \param Lo - The classification for the parts of the type
- /// residing in the low word of the containing object.
- ///
- /// \param Hi - The classification for the parts of the type
- /// residing in the high word of the containing object.
- ///
- /// \param OffsetBase - The bit offset of this type in the
- /// containing object. Some parameters are classified different
- /// depending on whether they straddle an eightbyte boundary.
- ///
- /// If a word is unused its result will be NoClass; if a type should
- /// be passed in Memory then at least the classification of \arg Lo
- /// will be Memory.
- ///
- /// The \arg Lo class will be NoClass iff the argument is ignored.
- ///
- /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
- /// also be ComplexX87.
- void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
- Class &Lo, Class &Hi) const;
-
- /// getCoerceResult - Given a source type \arg Ty and an LLVM type
- /// to coerce to, chose the best way to pass Ty in the same place
- /// that \arg CoerceTo would be passed, but while keeping the
- /// emitted code as simple as possible.
- ///
- /// FIXME: Note, this should be cleaned up to just take an enumeration of all
- /// the ways we might want to pass things, instead of constructing an LLVM
- /// type. This makes this code more explicit, and it makes it clearer that we
- /// are also doing this for correctness in the case of passing scalar types.
- ABIArgInfo getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty,
- ASTContext &Context) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext,
- unsigned &neededInt,
- unsigned &neededSSE) const;
-
-public:
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-}
-
-X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
- Class Field) const {
- // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
- // classified recursively so that always two fields are
- // considered. The resulting class is calculated according to
- // the classes of the fields in the eightbyte:
- //
- // (a) If both classes are equal, this is the resulting class.
- //
- // (b) If one of the classes is NO_CLASS, the resulting class is
- // the other class.
- //
- // (c) If one of the classes is MEMORY, the result is the MEMORY
- // class.
- //
- // (d) If one of the classes is INTEGER, the result is the
- // INTEGER.
- //
- // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
- // MEMORY is used as class.
- //
- // (f) Otherwise class SSE is used.
-
- // Accum should never be memory (we should have returned) or
- // ComplexX87 (because this cannot be passed in a structure).
- assert((Accum != Memory && Accum != ComplexX87) &&
- "Invalid accumulated classification during merge.");
- if (Accum == Field || Field == NoClass)
- return Accum;
- else if (Field == Memory)
- return Memory;
- else if (Accum == NoClass)
- return Field;
- else if (Accum == Integer || Field == Integer)
- return Integer;
- else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
- Accum == X87 || Accum == X87Up)
- return Memory;
- else
- return SSE;
-}
-
-void X86_64ABIInfo::classify(QualType Ty,
- ASTContext &Context,
- uint64_t OffsetBase,
- Class &Lo, Class &Hi) const {
- // FIXME: This code can be simplified by introducing a simple value class for
- // Class pairs with appropriate constructor methods for the various
- // situations.
-
- // FIXME: Some of the split computations are wrong; unaligned vectors
- // shouldn't be passed in registers for example, so there is no chance they
- // can straddle an eightbyte. Verify & simplify.
-
- Lo = Hi = NoClass;
-
- Class &Current = OffsetBase < 64 ? Lo : Hi;
- Current = Memory;
-
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- BuiltinType::Kind k = BT->getKind();
-
- if (k == BuiltinType::Void) {
- Current = NoClass;
- } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
- Lo = Integer;
- Hi = Integer;
- } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
- Current = Integer;
- } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
- Current = SSE;
- } else if (k == BuiltinType::LongDouble) {
- Lo = X87;
- Hi = X87Up;
- }
- // FIXME: _Decimal32 and _Decimal64 are SSE.
- // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
- } else if (const EnumType *ET = Ty->getAs<EnumType>()) {
- // Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
- } else if (Ty->hasPointerRepresentation()) {
- Current = Integer;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- uint64_t Size = Context.getTypeSize(VT);
- if (Size == 32) {
- // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
- // float> as integer.
- Current = Integer;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
- if (EB_Real != EB_Imag)
- Hi = Lo;
- } else if (Size == 64) {
- // gcc passes <1 x double> in memory. :(
- if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
- return;
-
- // gcc passes <1 x long long> as INTEGER.
- if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
- Current = Integer;
- else
- Current = SSE;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- if (OffsetBase && OffsetBase != 64)
- Hi = Lo;
- } else if (Size == 128) {
- Lo = SSE;
- Hi = SSEUp;
- }
- } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- QualType ET = Context.getCanonicalType(CT->getElementType());
-
- uint64_t Size = Context.getTypeSize(Ty);
- if (ET->isIntegralType()) {
- if (Size <= 64)
- Current = Integer;
- else if (Size <= 128)
- Lo = Hi = Integer;
- } else if (ET == Context.FloatTy)
- Current = SSE;
- else if (ET == Context.DoubleTy)
- Lo = Hi = SSE;
- else if (ET == Context.LongDoubleTy)
- Current = ComplexX87;
-
- // If this complex type crosses an eightbyte boundary then it
- // should be split.
- uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
- if (Hi == NoClass && EB_Real != EB_Imag)
- Hi = Lo;
- } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
- // Arrays are treated like structures.
-
- uint64_t Size = Context.getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
- return;
-
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
- //
- // Only need to check alignment of array base.
- if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
- return;
-
- // Otherwise implement simplified merge. We could be smarter about
- // this, but it isn't worth it and would be harder to verify.
- Current = NoClass;
- uint64_t EltSize = Context.getTypeSize(AT->getElementType());
- uint64_t ArraySize = AT->getSize().getZExtValue();
- for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
- Class FieldLo, FieldHi;
- classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- // Do post merger cleanup (see below). Only case we worry about is Memory.
- if (Hi == Memory)
- Lo = Memory;
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- uint64_t Size = Context.getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
- return;
-
- // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
- // copy constructor or a non-trivial destructor, it is passed by invisible
- // reference.
- if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return;
-
- const RecordDecl *RD = RT->getDecl();
-
- // Assume variable sized types are passed in memory.
- if (RD->hasFlexibleArrayMember())
- return;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // Reset Lo class, this will be recomputed.
- Current = NoClass;
-
- // If this is a C++ record, classify the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
- e = CXXRD->bases_end(); i != e; ++i) {
- assert(!i->isVirtual() && !i->getType()->isDependentType() &&
- "Unexpected base class!");
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
-
- // Classify this field.
- //
- // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
- // single eightbyte, each is classified separately. Each eightbyte gets
- // initialized to class NO_CLASS.
- Class FieldLo, FieldHi;
- uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
- classify(i->getType(), Context, Offset, FieldLo, FieldHi);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- // If this record has no fields but isn't empty, classify as INTEGER.
- if (RD->field_empty() && Size)
- Current = Integer;
- }
-
- // Classify the fields one at a time, merging the results.
- unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- bool BitField = i->isBitField();
-
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
- //
- // Note, skip this test for bit-fields, see below.
- if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
- Lo = Memory;
- return;
- }
-
- // Classify this field.
- //
- // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
- // exceeds a single eightbyte, each is classified
- // separately. Each eightbyte gets initialized to class
- // NO_CLASS.
- Class FieldLo, FieldHi;
-
- // Bit-fields require special handling, they do not force the
- // structure to be passed in memory even if unaligned, and
- // therefore they can straddle an eightbyte.
- if (BitField) {
- // Ignore padding bit-fields.
- if (i->isUnnamedBitfield())
- continue;
-
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
-
- uint64_t EB_Lo = Offset / 64;
- uint64_t EB_Hi = (Offset + Size - 1) / 64;
- FieldLo = FieldHi = NoClass;
- if (EB_Lo) {
- assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
- FieldLo = NoClass;
- FieldHi = Integer;
- } else {
- FieldLo = Integer;
- FieldHi = EB_Hi ? Integer : NoClass;
- }
- } else
- classify(i->getType(), Context, Offset, FieldLo, FieldHi);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
- //
- // (a) If one of the classes is MEMORY, the whole argument is
- // passed in memory.
- //
- // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
-
- // The first of these conditions is guaranteed by how we implement
- // the merge (just bail).
- //
- // The second condition occurs in the case of unions; for example
- // union { _Complex double; unsigned; }.
- if (Hi == Memory)
- Lo = Memory;
- if (Hi == SSEUp && Lo != SSE)
- Hi = SSE;
- }
-}
-
-ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const {
- if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
- // Integer and pointer types will end up in a general purpose
- // register.
- if (Ty->isIntegralType() || Ty->hasPointerRepresentation())
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
- // FIXME: It would probably be better to make CGFunctionInfo only map using
- // canonical types than to canonize here.
- QualType CTy = Context.getCanonicalType(Ty);
-
- // Float and double end up in a single SSE reg.
- if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
- return ABIArgInfo::getDirect();
-
- }
-
- return ABIArgInfo::getCoerce(CoerceTo);
-}
-
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
- ASTContext &Context) const {
- // If this is a scalar LLVM value then assume LLVM will pass it in the right
- // place naturally.
- if (!CodeGenFunction::hasAggregateLLVMType(Ty))
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-
- bool ByVal = !isRecordWithNonTrivialDestructorOrCopyConstructor(Ty);
-
- // FIXME: Set alignment correctly.
- return ABIArgInfo::getIndirect(0, ByVal);
-}
-
-ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
- // classification algorithm.
- X86_64ABIInfo::Class Lo, Hi;
- classify(RetTy, Context, 0, Lo, Hi);
-
- // Check some invariants.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- const llvm::Type *ResType = 0;
- switch (Lo) {
- case NoClass:
- return ABIArgInfo::getIgnore();
-
- case SSEUp:
- case X87Up:
- assert(0 && "Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
- // hidden argument.
- case Memory:
- return getIndirectResult(RetTy, Context);
-
- // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
- // available register of the sequence %rax, %rdx is used.
- case Integer:
- ResType = llvm::Type::getInt64Ty(VMContext); break;
-
- // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
- // available SSE register of the sequence %xmm0, %xmm1 is used.
- case SSE:
- ResType = llvm::Type::getDoubleTy(VMContext); break;
-
- // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
- // returned on the X87 stack in %st0 as 80-bit x87 number.
- case X87:
- ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
-
- // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
- // part of the value is returned in %st0 and the imaginary part in
- // %st1.
- case ComplexX87:
- assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
- llvm::Type::getX86_FP80Ty(VMContext),
- NULL);
- break;
- }
-
- switch (Hi) {
- // Memory was handled previously and X87 should
- // never occur as a hi class.
- case Memory:
- case X87:
- assert(0 && "Invalid classification for hi word.");
-
- case ComplexX87: // Previously handled.
- case NoClass: break;
-
- case Integer:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getInt64Ty(VMContext), NULL);
- break;
- case SSE:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getDoubleTy(VMContext), NULL);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
- // is passed in the upper half of the last used SSE register.
- //
- // SSEUP should always be preceeded by SSE, just widen.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
- // returned together with the previous X87 value in %st0.
- case X87Up:
- // If X87Up is preceeded by X87, we don't need to do
- // anything. However, in some cases with unions it may not be
- // preceeded by X87. In such situations we follow gcc and pass the
- // extra bits in an SSE reg.
- if (Lo != X87)
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getDoubleTy(VMContext), NULL);
- break;
- }
-
- return getCoerceResult(RetTy, ResType, Context);
-}
-
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- unsigned &neededInt,
- unsigned &neededSSE) const {
- X86_64ABIInfo::Class Lo, Hi;
- classify(Ty, Context, 0, Lo, Hi);
-
- // Check some invariants.
- // FIXME: Enforce these by construction.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- neededInt = 0;
- neededSSE = 0;
- const llvm::Type *ResType = 0;
- switch (Lo) {
- case NoClass:
- return ABIArgInfo::getIgnore();
-
- // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
- // on the stack.
- case Memory:
-
- // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
- // COMPLEX_X87, it is passed in memory.
- case X87:
- case ComplexX87:
- return getIndirectResult(Ty, Context);
-
- case SSEUp:
- case X87Up:
- assert(0 && "Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
- // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
- // and %r9 is used.
- case Integer:
- ++neededInt;
- ResType = llvm::Type::getInt64Ty(VMContext);
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
- // available SSE register is used, the registers are taken in the
- // order from %xmm0 to %xmm7.
- case SSE:
- ++neededSSE;
- ResType = llvm::Type::getDoubleTy(VMContext);
- break;
- }
-
- switch (Hi) {
- // Memory was handled previously, ComplexX87 and X87 should
- // never occur as hi classes, and X87Up must be preceed by X87,
- // which is passed in memory.
- case Memory:
- case X87:
- case ComplexX87:
- assert(0 && "Invalid classification for hi word.");
- break;
-
- case NoClass: break;
- case Integer:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getInt64Ty(VMContext), NULL);
- ++neededInt;
- break;
-
- // X87Up generally doesn't occur here (long double is passed in
- // memory), except in situations involving unions.
- case X87Up:
- case SSE:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getDoubleTy(VMContext), NULL);
- ++neededSSE;
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
- // eightbyte is passed in the upper half of the last used SSE
- // register.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
- break;
- }
-
- return getCoerceResult(Ty, ResType, Context);
-}
-
-void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- Context, VMContext);
-
- // Keep track of the number of assigned registers.
- unsigned freeIntRegs = 6, freeSSERegs = 8;
-
- // If the return value is indirect, then the hidden argument is consuming one
- // integer register.
- if (FI.getReturnInfo().isIndirect())
- --freeIntRegs;
-
- // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
- // get assigned (in left-to-right order) for passing as follows...
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, Context, VMContext,
- neededInt, neededSSE);
-
- // AMD64-ABI 3.2.3p3: If there are no registers available for any
- // eightbyte of an argument, the whole argument is passed on the
- // stack. If registers have already been assigned for some
- // eightbytes of such an argument, the assignments get reverted.
- if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
- freeIntRegs -= neededInt;
- freeSSERegs -= neededSSE;
- } else {
- it->info = getIndirectResult(it->type, Context);
- }
- }
-}
-
-static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) {
- llvm::Value *overflow_arg_area_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
- llvm::Value *overflow_arg_area =
- CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
-
- // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
- // byte boundary if alignment needed by type exceeds 8 byte boundary.
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
- if (Align > 8) {
- // Note that we follow the ABI & gcc here, even though the type
- // could in theory have an alignment greater than 16. This case
- // shouldn't ever matter in practice.
-
- // overflow_arg_area = (overflow_arg_area + 15) & ~15;
- llvm::Value *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
- overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
- llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
- llvm::Type::getInt64Ty(CGF.getLLVMContext()));
- llvm::Value *Mask = llvm::ConstantInt::get(
- llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
- overflow_arg_area =
- CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
- overflow_arg_area->getType(),
- "overflow_arg_area.align");
- }
-
- // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *Res =
- CGF.Builder.CreateBitCast(overflow_arg_area,
- llvm::PointerType::getUnqual(LTy));
-
- // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
- // l->overflow_arg_area + sizeof(type).
- // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
- // an 8 byte boundary.
-
- uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
- llvm::Value *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
- (SizeInBytes + 7) & ~7);
- overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
- "overflow_arg_area.next");
- CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
-
- // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Res;
-}
-
-llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- llvm::LLVMContext &VMContext = CGF.getLLVMContext();
- const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
- const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
-
- // Assume that va_list type is correct; should be pointer to LLVM type:
- // struct {
- // i32 gp_offset;
- // i32 fp_offset;
- // i8* overflow_arg_area;
- // i8* reg_save_area;
- // };
- unsigned neededInt, neededSSE;
- ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
- neededInt, neededSSE);
-
- // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
- // in the registers. If not go to step 7.
- if (!neededInt && !neededSSE)
- return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
-
- // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
- // general purpose registers needed to pass type and num_fp to hold
- // the number of floating point registers needed.
-
- // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
- // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
- // l->fp_offset > 304 - num_fp * 16 go to step 7.
- //
- // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
- // register save space).
-
- llvm::Value *InRegs = 0;
- llvm::Value *gp_offset_p = 0, *gp_offset = 0;
- llvm::Value *fp_offset_p = 0, *fp_offset = 0;
- if (neededInt) {
- gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
- gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
- InRegs =
- CGF.Builder.CreateICmpULE(gp_offset,
- llvm::ConstantInt::get(i32Ty,
- 48 - neededInt * 8),
- "fits_in_gp");
- }
-
- if (neededSSE) {
- fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
- fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
- llvm::Value *FitsInFP =
- CGF.Builder.CreateICmpULE(fp_offset,
- llvm::ConstantInt::get(i32Ty,
- 176 - neededSSE * 16),
- "fits_in_fp");
- InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
- }
-
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
-
- // Emit code to load the value if it was passed in registers.
-
- CGF.EmitBlock(InRegBlock);
-
- // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
- // an offset of l->gp_offset and/or l->fp_offset. This may require
- // copying to a temporary location in case the parameter is passed
- // in different register classes or requires an alignment greater
- // than 8 for general purpose registers and 16 for XMM registers.
- //
- // FIXME: This really results in shameful code when we end up needing to
- // collect arguments from different places; often what should result in a
- // simple assembling of a structure from scattered addresses has many more
- // loads than necessary. Can we clean this up?
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *RegAddr =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
- "reg_save_area");
- if (neededInt && neededSSE) {
- // FIXME: Cleanup.
- assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
- const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
- llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
- assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
- const llvm::Type *TyLo = ST->getElementType(0);
- const llvm::Type *TyHi = ST->getElementType(1);
- assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
- "Unexpected ABI info for mixed regs");
- const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
- const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
- llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
- llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
- llvm::Value *V =
- CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
-
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
- } else if (neededInt) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
- } else {
- if (neededSSE == 1) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
- } else {
- assert(neededSSE == 2 && "Invalid number of needed registers!");
- // SSE registers are spaced 16 bytes apart in the register save
- // area, we need to collect the two eightbytes together.
- llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegAddrHi =
- CGF.Builder.CreateGEP(RegAddrLo,
- llvm::ConstantInt::get(i32Ty, 16));
- const llvm::Type *DblPtrTy =
- llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
- DoubleTy, NULL);
- llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
- }
- }
-
- // AMD64-ABI 3.5.7p5: Step 5. Set:
- // l->gp_offset = l->gp_offset + num_gp * 8
- // l->fp_offset = l->fp_offset + num_fp * 16.
- if (neededInt) {
- llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
- gp_offset_p);
- }
- if (neededSSE) {
- llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
- fp_offset_p);
- }
- CGF.EmitBranch(ContBlock);
-
- // Emit code to load the value if it was passed in memory.
-
- CGF.EmitBlock(InMemBlock);
- llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
-
- // Return the appropriate result.
-
- CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
- "vaarg.addr");
- ResAddr->reserveOperandSpace(2);
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(MemAddr, InMemBlock);
-
- return ResAddr;
-}
-
-// PIC16 ABI Implementation
-
-namespace {
-
-class PIC16ABIInfo : public ABIInfo {
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-}
-
-ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else {
- return ABIArgInfo::getDirect();
- }
-}
-
-ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- return ABIArgInfo::getDirect();
-}
-
-llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return 0;
-}
-
-// ARM ABI Implementation
-
-namespace {
-
-class ARMABIInfo : public ABIInfo {
-public:
- enum ABIKind {
- APCS = 0,
- AAPCS = 1,
- AAPCS_VFP
- };
-
-private:
- ABIKind Kind;
-
-public:
- ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
-
-private:
- ABIKind getABIKind() const { return Kind; }
-
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMCOntext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-}
-
-void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- it->info = classifyArgumentType(it->type, Context, VMContext);
- }
-
- // ARM always overrides the calling convention.
- switch (getABIKind()) {
- case APCS:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
- break;
-
- case AAPCS:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
- break;
-
- case AAPCS_VFP:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
- break;
- }
-}
-
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (!CodeGenFunction::hasAggregateLLVMType(Ty))
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-
- // Ignore empty records.
- if (isEmptyRecord(Context, Ty, true))
- return ABIArgInfo::getIgnore();
-
- // FIXME: This is kind of nasty... but there isn't much choice because the ARM
- // backend doesn't support byval.
- // FIXME: This doesn't handle alignment > 64 bits.
- const llvm::Type* ElemTy;
- unsigned SizeRegs;
- if (Context.getTypeAlign(Ty) > 32) {
- ElemTy = llvm::Type::getInt64Ty(VMContext);
- SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
- } else {
- ElemTy = llvm::Type::getInt32Ty(VMContext);
- SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
- }
- std::vector<const llvm::Type*> LLVMFields;
- LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
- const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
- return ABIArgInfo::getCoerce(STy);
-}
-
-static bool isIntegerLikeType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) {
- // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
- // is called integer-like if its size is less than or equal to one word, and
- // the offset of each of its addressable sub-fields is zero.
-
- uint64_t Size = Context.getTypeSize(Ty);
-
- // Check that the type fits in a word.
- if (Size > 32)
- return false;
-
- // FIXME: Handle vector types!
- if (Ty->isVectorType())
- return false;
-
- // Float types are never treated as "integer like".
- if (Ty->isRealFloatingType())
- return false;
-
- // If this is a builtin or pointer type then it is ok.
- if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
- return true;
-
- // Complex types "should" be ok by the definition above, but they are not.
- if (Ty->isAnyComplexType())
- return false;
-
- // Single element and zero sized arrays should be allowed, by the definition
- // above, but they are not.
-
- // Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT) return false;
-
- // Ignore records with flexible arrays.
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- // Check that all sub-fields are at offset 0, and are themselves "integer
- // like".
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- bool HadField = false;
- unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- const FieldDecl *FD = *i;
-
- // Check if this field is at offset 0.
- uint64_t Offset = Layout.getFieldOffset(idx);
- if (Offset != 0) {
- // Allow padding bit-fields, but only if they are all at the end of the
- // structure (despite the wording above, this matches gcc).
- if (FD->isBitField() &&
- !FD->getBitWidth()->EvaluateAsInt(Context).getZExtValue()) {
- for (; i != e; ++i)
- if (!i->isBitField() ||
- i->getBitWidth()->EvaluateAsInt(Context).getZExtValue())
- return false;
-
- // All remaining fields are padding, allow this.
- return true;
- }
-
- return false;
- }
-
- if (!isIntegerLikeType(FD->getType(), Context, VMContext))
- return false;
-
- // Only allow at most one field in a structure. Again this doesn't match the
- // wording above, but follows gcc.
- if (!RD->isUnion()) {
- if (HadField)
- return false;
-
- HadField = true;
- }
- }
-
- return true;
-}
-
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (!CodeGenFunction::hasAggregateLLVMType(RetTy))
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-
- // Are we following APCS?
- if (getABIKind() == APCS) {
- if (isEmptyRecord(Context, RetTy, false))
- return ABIArgInfo::getIgnore();
-
- // Integer like structures are returned in r0.
- if (isIntegerLikeType(RetTy, Context, VMContext)) {
- // Return in the smallest viable integer type.
- uint64_t Size = Context.getTypeSize(RetTy);
- if (Size <= 8)
- return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
- if (Size <= 16)
- return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
- return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
- }
-
- // Otherwise return in memory.
- return ABIArgInfo::getIndirect(0);
- }
-
- // Otherwise this is an AAPCS variant.
-
- if (isEmptyRecord(Context, RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Aggregates <= 4 bytes are returned in r0; other aggregates
- // are returned indirectly.
- uint64_t Size = Context.getTypeSize(RetTy);
- if (Size <= 32) {
- // Return in the smallest viable integer type.
- if (Size <= 8)
- return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
- if (Size <= 16)
- return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
- return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
- }
-
- return ABIArgInfo::getIndirect(0);
-}
-
-llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // FIXME: Need to handle alignment
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- return ABIArgInfo::getIndirect(0);
- } else {
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
-}
-
-// SystemZ ABI Implementation
-
-namespace {
-
-class SystemZABIInfo : public ABIInfo {
- bool isPromotableIntegerType(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- Context, VMContext);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-}
-
-bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
- // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Bool:
- case BuiltinType::Char_S:
- case BuiltinType::Char_U:
- case BuiltinType::SChar:
- case BuiltinType::UChar:
- case BuiltinType::Short:
- case BuiltinType::UShort:
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- return false;
- }
- return false;
-}
-
-llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // FIXME: Implement
- return 0;
-}
-
-
-ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- return ABIArgInfo::getIndirect(0);
- } else {
- return (isPromotableIntegerType(RetTy) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
-}
-
-ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
- return ABIArgInfo::getIndirect(0);
- } else {
- return (isPromotableIntegerType(Ty) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
-}
-
-ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
- return ABIArgInfo::getIndirect(0);
- } else {
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
-}
-
-llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return 0;
-}
-
-const ABIInfo &CodeGenTypes::getABIInfo() const {
- if (TheABIInfo)
- return *TheABIInfo;
-
- // For now we just cache the ABIInfo in CodeGenTypes and don't free it.
-
- const llvm::Triple &Triple(getContext().Target.getTriple());
- switch (Triple.getArch()) {
- default:
- return *(TheABIInfo = new DefaultABIInfo);
-
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- // FIXME: We want to know the float calling convention as well.
- if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
- return *(TheABIInfo = new ARMABIInfo(ARMABIInfo::APCS));
-
- return *(TheABIInfo = new ARMABIInfo(ARMABIInfo::AAPCS));
-
- case llvm::Triple::pic16:
- return *(TheABIInfo = new PIC16ABIInfo());
-
- case llvm::Triple::systemz:
- return *(TheABIInfo = new SystemZABIInfo());
-
- case llvm::Triple::x86:
- switch (Triple.getOS()) {
- case llvm::Triple::Darwin:
- return *(TheABIInfo = new X86_32ABIInfo(Context, true, true));
- case llvm::Triple::Cygwin:
- case llvm::Triple::MinGW32:
- case llvm::Triple::MinGW64:
- case llvm::Triple::AuroraUX:
- case llvm::Triple::DragonFly:
- case llvm::Triple::FreeBSD:
- case llvm::Triple::OpenBSD:
- return *(TheABIInfo = new X86_32ABIInfo(Context, false, true));
-
- default:
- return *(TheABIInfo = new X86_32ABIInfo(Context, false, false));
- }
-
- case llvm::Triple::x86_64:
- return *(TheABIInfo = new X86_64ABIInfo());
- }
-}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index e5fd47e..4454662 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -257,7 +257,7 @@ class DefaultABIInfo : public ABIInfo {
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {};
+ DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
};
llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -320,7 +320,7 @@ public:
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
- :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {};
+ :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {}
};
}
@@ -619,7 +619,7 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {};
+ X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {}
};
}
@@ -1428,7 +1428,7 @@ class PIC16ABIInfo : public ABIInfo {
class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {};
+ PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {}
};
}
@@ -1493,7 +1493,7 @@ private:
class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
public:
ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
- :TargetCodeGenInfo(new ARMABIInfo(K)) {};
+ :TargetCodeGenInfo(new ARMABIInfo(K)) {}
};
}
@@ -1754,7 +1754,7 @@ class SystemZABIInfo : public ABIInfo {
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {};
+ SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {}
};
}
@@ -1816,7 +1816,7 @@ namespace {
class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {};
+ MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const;
};
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 495b22f..58b7b79 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -34,7 +34,7 @@ namespace clang {
ABIInfo *Info;
public:
// WARNING: Acquires the ownership of ABIInfo.
- TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { };
+ TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { }
virtual ~TargetCodeGenInfo();
/// getABIInfo() - Returns ABI info helper for the target.
@@ -43,7 +43,7 @@ namespace clang {
/// SetTargetAttributes - Provides a convenient hook to handle extra
/// target-specific attributes for the given global.
virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const { };
+ CodeGen::CodeGenModule &M) const { }
};
}
OpenPOWER on IntegriCloud