summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authored <ed@FreeBSD.org>2009-06-06 08:21:31 +0000
committered <ed@FreeBSD.org>2009-06-06 08:21:31 +0000
commit265c92560db8af7e64dc328cb612076086a62bd1 (patch)
tree06d57bb7679a2140aef96db7105a0bd5f16a4358 /lib
parent9e262ca77e924f9d84a864b031a1b931d03c5e38 (diff)
downloadFreeBSD-src-265c92560db8af7e64dc328cb612076086a62bd1.zip
FreeBSD-src-265c92560db8af7e64dc328cb612076086a62bd1.tar.gz
Import clang, at r72995.
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTContext.cpp74
-rw-r--r--lib/AST/Decl.cpp2
-rw-r--r--lib/AST/DeclCXX.cpp4
-rw-r--r--lib/AST/DeclObjC.cpp22
-rw-r--r--lib/AST/DeclPrinter.cpp46
-rw-r--r--lib/AST/DeclTemplate.cpp46
-rw-r--r--lib/AST/ExprCXX.cpp11
-rw-r--r--lib/AST/ExprConstant.cpp3
-rw-r--r--lib/AST/Type.cpp8
-rw-r--r--lib/Analysis/CFRefCount.cpp25
-rw-r--r--lib/Basic/TargetInfo.cpp1
-rw-r--r--lib/Basic/Targets.cpp17
-rw-r--r--lib/CodeGen/ABIInfo.h33
-rw-r--r--lib/CodeGen/CGBlocks.cpp39
-rw-r--r--lib/CodeGen/CGBlocks.h12
-rw-r--r--lib/CodeGen/CGCXXTemp.cpp79
-rw-r--r--lib/CodeGen/CGCall.cpp1389
-rw-r--r--lib/CodeGen/CGDecl.cpp19
-rw-r--r--lib/CodeGen/CGExprAgg.cpp4
-rw-r--r--lib/CodeGen/CGExprScalar.cpp12
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp8
-rw-r--r--lib/CodeGen/CGObjCMac.cpp46
-rw-r--r--lib/CodeGen/CMakeLists.txt2
-rw-r--r--lib/CodeGen/CodeGenFunction.h19
-rw-r--r--lib/CodeGen/CodeGenModule.cpp7
-rw-r--r--lib/CodeGen/TargetABIInfo.cpp1379
-rw-r--r--lib/Driver/Tools.cpp15
-rw-r--r--lib/Frontend/InitPreprocessor.cpp11
-rw-r--r--lib/Frontend/PCHReader.cpp1
-rw-r--r--lib/Frontend/PCHWriter.cpp2
-rw-r--r--lib/Frontend/PrintParserCallbacks.cpp1
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp56
-rw-r--r--lib/Headers/emmintrin.h72
-rw-r--r--lib/Headers/mmintrin.h4
-rw-r--r--lib/Headers/tmmintrin.h6
-rw-r--r--lib/Headers/xmmintrin.h72
-rw-r--r--lib/Lex/LiteralSupport.cpp2
-rw-r--r--lib/Lex/PPExpressions.cpp2
-rw-r--r--lib/Parse/ParseObjc.cpp1
-rw-r--r--lib/Parse/ParsePragma.cpp57
-rw-r--r--lib/Parse/ParsePragma.h9
-rw-r--r--lib/Parse/ParseStmt.cpp8
-rw-r--r--lib/Parse/Parser.cpp6
-rw-r--r--lib/Sema/CMakeLists.txt1
-rw-r--r--lib/Sema/Sema.h45
-rw-r--r--lib/Sema/SemaDecl.cpp73
-rw-r--r--lib/Sema/SemaDeclCXX.cpp2
-rw-r--r--lib/Sema/SemaDeclObjC.cpp33
-rw-r--r--lib/Sema/SemaExprCXX.cpp26
-rw-r--r--lib/Sema/SemaTemplate.cpp192
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp395
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp34
-rw-r--r--lib/Sema/SemaTemplateInstantiateExpr.cpp3
-rw-r--r--lib/Sema/SemaTemplateInstantiateStmt.cpp8
54 files changed, 2639 insertions, 1805 deletions
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index b36d1f3..e6dea7c 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -143,7 +143,7 @@ void ASTContext::InitBuiltinTypes() {
// C99 6.2.5p2.
InitBuiltinType(BoolTy, BuiltinType::Bool);
// C99 6.2.5p3.
- if (Target.isCharSigned())
+ if (LangOpts.CharIsSigned)
InitBuiltinType(CharTy, BuiltinType::Char_S);
else
InitBuiltinType(CharTy, BuiltinType::Char_U);
@@ -613,6 +613,20 @@ void ASTContext::CollectObjCIvars(const ObjCInterfaceDecl *OI,
CollectLocalObjCIvars(this, OI, Fields);
}
+/// ShallowCollectObjCIvars -
+/// Collect all ivars, including those synthesized, in the current class.
+///
+void ASTContext::ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI,
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars,
+ bool CollectSynthesized) {
+ for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(),
+ E = OI->ivar_end(); I != E; ++I) {
+ Ivars.push_back(*I);
+ }
+ if (CollectSynthesized)
+ CollectSynthesizedIvars(OI, Ivars);
+}
+
void ASTContext::CollectProtocolSynthesizedIvars(const ObjCProtocolDecl *PD,
llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
for (ObjCContainerDecl::prop_iterator I = PD->prop_begin(*this),
@@ -645,6 +659,38 @@ void ASTContext::CollectSynthesizedIvars(const ObjCInterfaceDecl *OI,
}
}
+unsigned ASTContext::CountProtocolSynthesizedIvars(const ObjCProtocolDecl *PD) {
+ unsigned count = 0;
+ for (ObjCContainerDecl::prop_iterator I = PD->prop_begin(*this),
+ E = PD->prop_end(*this); I != E; ++I)
+ if ((*I)->getPropertyIvarDecl())
+ ++count;
+
+ // Also look into nested protocols.
+ for (ObjCProtocolDecl::protocol_iterator P = PD->protocol_begin(),
+ E = PD->protocol_end(); P != E; ++P)
+ count += CountProtocolSynthesizedIvars(*P);
+ return count;
+}
+
+unsigned ASTContext::CountSynthesizedIvars(const ObjCInterfaceDecl *OI)
+{
+ unsigned count = 0;
+ for (ObjCInterfaceDecl::prop_iterator I = OI->prop_begin(*this),
+ E = OI->prop_end(*this); I != E; ++I) {
+ if ((*I)->getPropertyIvarDecl())
+ ++count;
+ }
+ // Also look into interface's protocol list for properties declared
+ // in the protocol and whose ivars are synthesized.
+ for (ObjCInterfaceDecl::protocol_iterator P = OI->protocol_begin(),
+ PE = OI->protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *PD = (*P);
+ count += CountProtocolSynthesizedIvars(PD);
+ }
+ return count;
+}
+
/// getInterfaceLayoutImpl - Get or compute information about the
/// layout of the given interface.
///
@@ -664,14 +710,13 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
unsigned FieldCount = D->ivar_size();
// Add in synthesized ivar count if laying out an implementation.
if (Impl) {
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CollectSynthesizedIvars(D, Ivars);
- FieldCount += Ivars.size();
+ unsigned SynthCount = CountSynthesizedIvars(D);
+ FieldCount += SynthCount;
// If there aren't any sythesized ivars then reuse the interface
// entry. Note we can't cache this because we simply free all
// entries later; however we shouldn't look up implementations
// frequently.
- if (FieldCount == D->ivar_size())
+ if (SynthCount == 0)
return getObjCLayout(D, 0);
}
@@ -701,20 +746,11 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
// Layout each ivar sequentially.
unsigned i = 0;
- for (ObjCInterfaceDecl::ivar_iterator IVI = D->ivar_begin(),
- IVE = D->ivar_end(); IVI != IVE; ++IVI) {
- const ObjCIvarDecl* Ivar = (*IVI);
- NewEntry->LayoutField(Ivar, i++, false, StructPacking, *this);
- }
- // And synthesized ivars, if this is an implementation.
- if (Impl) {
- // FIXME. Do we need to colltect twice?
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CollectSynthesizedIvars(D, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
- NewEntry->LayoutField(Ivars[k], i++, false, StructPacking, *this);
- }
-
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ ShallowCollectObjCIvars(D, Ivars, Impl);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ NewEntry->LayoutField(Ivars[k], i++, false, StructPacking, *this);
+
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
NewEntry->FinalizeLayout();
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index cb3ec1f..dfec106 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -489,7 +489,7 @@ void FunctionDecl::setParams(ASTContext& C, ParmVarDecl **NewParamInfo,
unsigned FunctionDecl::getMinRequiredArguments() const {
unsigned NumRequiredArgs = getNumParams();
while (NumRequiredArgs > 0
- && getParamDecl(NumRequiredArgs-1)->getDefaultArg())
+ && getParamDecl(NumRequiredArgs-1)->hasDefaultArg())
--NumRequiredArgs;
return NumRequiredArgs;
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 19f8958..94daf48 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -326,7 +326,7 @@ CXXConstructorDecl::isCopyConstructor(ASTContext &Context,
// const volatile X&, and either there are no other parameters
// or else all other parameters have default arguments (8.3.6).
if ((getNumParams() < 1) ||
- (getNumParams() > 1 && getParamDecl(1)->getDefaultArg() == 0))
+ (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()))
return false;
const ParmVarDecl *Param = getParamDecl(0);
@@ -363,7 +363,7 @@ bool CXXConstructorDecl::isConvertingConstructor() const {
return (getNumParams() == 0 &&
getType()->getAsFunctionProtoType()->isVariadic()) ||
(getNumParams() == 1) ||
- (getNumParams() > 1 && getParamDecl(1)->getDefaultArg() != 0);
+ (getNumParams() > 1 && getParamDecl(1)->hasDefaultArg());
}
CXXDestructorDecl *
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index f4bb895..21aefdd 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -42,6 +42,19 @@ void ObjCListBase::set(void *const* InList, unsigned Elts, ASTContext &Ctx) {
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
+/// getIvarDecl - This method looks up an ivar in this ContextDecl.
+///
+ObjCIvarDecl *
+ObjCContainerDecl::getIvarDecl(ASTContext &Context, IdentifierInfo *Id) const {
+ lookup_const_iterator Ivar, IvarEnd;
+ for (llvm::tie(Ivar, IvarEnd) = lookup(Context, Id);
+ Ivar != IvarEnd; ++Ivar) {
+ if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar))
+ return ivar;
+ }
+ return 0;
+}
+
// Get the local instance method declared in this interface.
ObjCMethodDecl *
ObjCContainerDecl::getInstanceMethod(ASTContext &Context, Selector Sel) const {
@@ -139,12 +152,9 @@ ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(
ASTContext &Context, IdentifierInfo *ID, ObjCInterfaceDecl *&clsDeclared) {
ObjCInterfaceDecl* ClassDecl = this;
while (ClassDecl != NULL) {
- for (ivar_iterator I = ClassDecl->ivar_begin(), E = ClassDecl->ivar_end();
- I != E; ++I) {
- if ((*I)->getIdentifier() == ID) {
- clsDeclared = ClassDecl;
- return *I;
- }
+ if (ObjCIvarDecl *I = ClassDecl->getIvarDecl(Context, ID)) {
+ clsDeclared = ClassDecl;
+ return I;
}
// look into properties.
for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(Context),
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index f29da8b..f231abf 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -508,8 +508,50 @@ void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
}
void DeclPrinter::VisitTemplateDecl(TemplateDecl *D) {
- // TODO: Write template parameters.
- Out << "template <...> ";
+ Out << "template <";
+
+ TemplateParameterList *Params = D->getTemplateParameters();
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ if (i != 0)
+ Out << ", ";
+
+ const Decl *Param = Params->getParam(i);
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(Param)) {
+
+ QualType ParamType =
+ Context.getTypeDeclType(const_cast<TemplateTypeParmDecl*>(TTP));
+
+ if (TTP->wasDeclaredWithTypename())
+ Out << "typename ";
+ else
+ Out << "class ";
+
+ Out << ParamType.getAsString(Policy);
+
+ if (TTP->hasDefaultArgument()) {
+ Out << " = ";
+ Out << TTP->getDefaultArgument().getAsString(Policy);
+ };
+ } else if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Out << NTTP->getType().getAsString(Policy);
+
+ if (IdentifierInfo *Name = NTTP->getIdentifier()) {
+ Out << ' ';
+ Out << Name->getName();
+ }
+
+ if (NTTP->hasDefaultArgument()) {
+ Out << " = ";
+ NTTP->getDefaultArgument()->printPretty(Out, Context, 0, Policy,
+ Indentation);
+ }
+ }
+ }
+
+ Out << "> ";
+
Visit(D->getTemplatedDecl());
}
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index f38ee82..a534164 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -236,28 +236,42 @@ TemplateArgument::TemplateArgument(Expr *E) : Kind(Expression) {
}
//===----------------------------------------------------------------------===//
+// TemplateArgumentListBuilder Implementation
+//===----------------------------------------------------------------------===//
+void TemplateArgumentListBuilder::push_back(const TemplateArgument& Arg) {
+ switch (Arg.getKind()) {
+ default: break;
+ case TemplateArgument::Type:
+ assert(Arg.getAsType()->isCanonical() && "Type must be canonical!");
+ break;
+ }
+
+ Args.push_back(Arg);
+}
+
+//===----------------------------------------------------------------------===//
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
- TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
- bool CopyArgs)
- : NumArguments(NumTemplateArgs) {
+ TemplateArgumentListBuilder &Builder,
+ bool CopyArgs, bool FlattenArgs)
+ : NumArguments(Builder.flatSize()) {
if (!CopyArgs) {
- Arguments.setPointer(TemplateArgs);
+ Arguments.setPointer(Builder.getFlatArgumentList());
Arguments.setInt(1);
return;
}
- unsigned Size = sizeof(TemplateArgument) * NumTemplateArgs;
+
+ unsigned Size = sizeof(TemplateArgument) * Builder.flatSize();
unsigned Align = llvm::AlignOf<TemplateArgument>::Alignment;
void *Mem = Context.Allocate(Size, Align);
Arguments.setPointer((TemplateArgument *)Mem);
Arguments.setInt(0);
TemplateArgument *Args = (TemplateArgument *)Mem;
- for (unsigned I = 0; I != NumTemplateArgs; ++I)
- new (Args + I) TemplateArgument(TemplateArgs[I]);
+ for (unsigned I = 0; I != NumArguments; ++I)
+ new (Args + I) TemplateArgument(Builder.getFlatArgumentList()[I]);
}
TemplateArgumentList::~TemplateArgumentList() {
@@ -271,8 +285,7 @@ ClassTemplateSpecializationDecl::
ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK,
DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs)
+ TemplateArgumentListBuilder &Builder)
: CXXRecordDecl(DK,
SpecializedTemplate->getTemplatedDecl()->getTagKind(),
DC, L,
@@ -280,7 +293,7 @@ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK,
// class template specializations?
SpecializedTemplate->getIdentifier()),
SpecializedTemplate(SpecializedTemplate),
- TemplateArgs(Context, TemplateArgs, NumTemplateArgs, /*CopyArgs=*/true),
+ TemplateArgs(Context, Builder, /*CopyArgs=*/true, /*FlattenArgs=*/true),
SpecializationKind(TSK_Undeclared) {
}
@@ -288,16 +301,14 @@ ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::Create(ASTContext &Context,
DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs,
+ TemplateArgumentListBuilder &Builder,
ClassTemplateSpecializationDecl *PrevDecl) {
ClassTemplateSpecializationDecl *Result
= new (Context)ClassTemplateSpecializationDecl(Context,
ClassTemplateSpecialization,
DC, L,
SpecializedTemplate,
- TemplateArgs,
- NumTemplateArgs);
+ Builder);
Context.getTypeDeclType(Result, PrevDecl);
return Result;
}
@@ -310,14 +321,13 @@ ClassTemplatePartialSpecializationDecl::
Create(ASTContext &Context, DeclContext *DC, SourceLocation L,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
- TemplateArgument *TemplateArgs, unsigned NumTemplateArgs,
+ TemplateArgumentListBuilder &Builder,
ClassTemplatePartialSpecializationDecl *PrevDecl) {
ClassTemplatePartialSpecializationDecl *Result
= new (Context)ClassTemplatePartialSpecializationDecl(Context,
DC, L, Params,
SpecializedTemplate,
- TemplateArgs,
- NumTemplateArgs);
+ Builder);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
Context.getTypeDeclType(Result, PrevDecl);
return Result;
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 4a15245..8fd66a2 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -305,10 +305,11 @@ void CXXConstructExpr::Destroy(ASTContext &C) {
CXXExprWithTemporaries::CXXExprWithTemporaries(Expr *subexpr,
CXXTemporary **temps,
- unsigned numtemps)
+ unsigned numtemps,
+ bool destroytemps)
: Expr(CXXExprWithTemporariesClass, subexpr->getType(),
subexpr->isTypeDependent(), subexpr->isValueDependent()),
- SubExpr(subexpr), Temps(0), NumTemps(numtemps) {
+ SubExpr(subexpr), Temps(0), NumTemps(numtemps), DestroyTemps(destroytemps) {
if (NumTemps > 0) {
Temps = new CXXTemporary*[NumTemps];
for (unsigned i = 0; i < NumTemps; ++i)
@@ -319,8 +320,10 @@ CXXExprWithTemporaries::CXXExprWithTemporaries(Expr *subexpr,
CXXExprWithTemporaries *CXXExprWithTemporaries::Create(ASTContext &C,
Expr *SubExpr,
CXXTemporary **Temps,
- unsigned NumTemps) {
- return new (C) CXXExprWithTemporaries(SubExpr, Temps, NumTemps);
+ unsigned NumTemps,
+ bool DestroyTemps) {
+ return new (C) CXXExprWithTemporaries(SubExpr, Temps, NumTemps,
+ DestroyTemps);
}
void CXXExprWithTemporaries::Destroy(ASTContext &C) {
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 50fdcfd..8e3c3ce 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -946,7 +946,8 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
const QualType ElementType = Type->getAsPointerType()->getPointeeType();
uint64_t D = LHSValue.getLValueOffset() - RHSValue.getLValueOffset();
- D /= Info.Ctx.getTypeSize(ElementType) / 8;
+ if (!ElementType->isVoidType() && !ElementType->isFunctionType())
+ D /= Info.Ctx.getTypeSize(ElementType) / 8;
return Success(D, E);
}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index f573744..e304f54 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -1049,6 +1049,10 @@ TemplateSpecializationType::
anyDependentTemplateArguments(const TemplateArgument *Args, unsigned NumArgs) {
for (unsigned Idx = 0; Idx < NumArgs; ++Idx) {
switch (Args[Idx].getKind()) {
+ case TemplateArgument::Null:
+ assert(false && "Should not have a NULL template argument");
+ break;
+
case TemplateArgument::Type:
if (Args[Idx].getAsType()->isDependentType())
return true;
@@ -1451,6 +1455,10 @@ TemplateSpecializationType::PrintTemplateArgumentList(
// Print the argument into a string.
std::string ArgString;
switch (Args[Arg].getKind()) {
+ case TemplateArgument::Null:
+ assert(false && "Null template argument");
+ break;
+
case TemplateArgument::Type:
Args[Arg].getAsType().getAsStringInternal(ArgString, Policy);
break;
diff --git a/lib/Analysis/CFRefCount.cpp b/lib/Analysis/CFRefCount.cpp
index 30ff67f..532d16d 100644
--- a/lib/Analysis/CFRefCount.cpp
+++ b/lib/Analysis/CFRefCount.cpp
@@ -636,7 +636,11 @@ class VISIBILITY_HIDDEN RetainSummaryManager {
/// ObjCAllocRetE - Default return effect for methods returning Objective-C
/// objects.
RetEffect ObjCAllocRetE;
-
+
+ /// ObjCInitRetE - Default return effect for init methods returning Objective-C
+ /// objects.
+ RetEffect ObjCInitRetE;
+
RetainSummary DefaultSummary;
RetainSummary* StopSummary;
@@ -776,6 +780,8 @@ public:
GCEnabled(gcenabled), AF(BPAlloc), ScratchArgs(AF.GetEmptyMap()),
ObjCAllocRetE(gcenabled ? RetEffect::MakeGCNotOwned()
: RetEffect::MakeOwned(RetEffect::ObjC, true)),
+ ObjCInitRetE(gcenabled ? RetEffect::MakeGCNotOwned()
+ : RetEffect::MakeOwnedWhenTrackedReceiver()),
DefaultSummary(AF.GetEmptyMap() /* per-argument effects (none) */,
RetEffect::MakeNoRet() /* return effect */,
MayEscape, /* default argument effect */
@@ -1156,8 +1162,7 @@ RetainSummaryManager::getInitMethodSummary(QualType RetTy) {
// 'init' methods conceptually return a newly allocated object and claim
// the receiver.
if (isTrackedObjCObjectType(RetTy) || isTrackedCFObjectType(RetTy))
- return getPersistentSummary(RetEffect::MakeOwnedWhenTrackedReceiver(),
- DecRefMsg);
+ return getPersistentSummary(ObjCInitRetE, DecRefMsg);
return getDefaultSummary();
}
@@ -1168,12 +1173,19 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
if (!FD)
return;
+ QualType RetTy = FD->getResultType();
+
// Determine if there is a special return effect for this method.
- if (isTrackedObjCObjectType(FD->getResultType())) {
+ if (isTrackedObjCObjectType(RetTy)) {
if (FD->getAttr<NSReturnsRetainedAttr>()) {
Summ.setRetEffect(ObjCAllocRetE);
}
- else if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ else if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ }
+ }
+ else if (RetTy->getAsPointerType()) {
+ if (FD->getAttr<CFReturnsRetainedAttr>()) {
Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
}
}
@@ -1367,8 +1379,7 @@ void RetainSummaryManager::InitializeMethodSummaries() {
// Create the "init" selector. It just acts as a pass-through for the
// receiver.
addNSObjectMethSummary(GetNullarySelector("init", Ctx),
- getPersistentSummary(RetEffect::MakeOwnedWhenTrackedReceiver(),
- DecRefMsg));
+ getPersistentSummary(ObjCInitRetE, DecRefMsg));
// The next methods are allocators.
RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index 1e8ca2b..a513cb1 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -22,7 +22,6 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
// Set defaults. Defaults are set for a 32-bit RISC platform,
// like PPC or SPARC.
// These should be overridden by concrete targets as needed.
- CharIsSigned = true;
TLSSupported = true;
PointerWidth = PointerAlign = 32;
WCharWidth = WCharAlign = 32;
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 4b94bcf..120d525 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -182,6 +182,11 @@ static void getDarwinDefines(std::vector<char> &Defs, const LangOptions &Opts) {
Define(Defs, "__strong", "");
else
Define(Defs, "__strong", "__attribute__((objc_gc(strong)))");
+
+ if (Opts.Static)
+ Define(Defs, "__STATIC__");
+ else
+ Define(Defs, "__DYNAMIC__");
}
static void getDarwinOSXDefines(std::vector<char> &Defs, const char *Triple) {
@@ -252,9 +257,8 @@ class PPCTargetInfo : public TargetInfo {
static const TargetInfo::GCCRegAlias GCCRegAliases[];
public:
- PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {
- CharIsSigned = false;
- }
+ PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {}
+
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
Records = BuiltinInfo;
@@ -294,6 +298,10 @@ public:
return true;
}
}
+ virtual void getDefaultLangOptions(LangOptions &Opts) {
+ TargetInfo::getDefaultLangOptions(Opts);
+ Opts.CharIsSigned = false;
+ }
virtual const char *getClobbers() const {
return "";
}
@@ -444,6 +452,7 @@ public:
/// various language options. These may be overridden by command line
/// options.
virtual void getDefaultLangOptions(LangOptions &Opts) {
+ PPC32TargetInfo::getDefaultLangOptions(Opts);
GetDarwinLanguageOptions(Opts, getTargetTriple());
}
};
@@ -464,6 +473,7 @@ public:
/// various language options. These may be overridden by command line
/// options.
virtual void getDefaultLangOptions(LangOptions &Opts) {
+ PPC64TargetInfo::getDefaultLangOptions(Opts);
GetDarwinLanguageOptions(Opts, getTargetTriple());
}
};
@@ -840,6 +850,7 @@ public:
/// various language options. These may be overridden by command line
/// options.
virtual void getDefaultLangOptions(LangOptions &Opts) {
+ X86_32TargetInfo::getDefaultLangOptions(Opts);
GetDarwinLanguageOptions(Opts, getTargetTriple());
}
};
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 3de4612..44af0c4 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -10,8 +10,13 @@
#ifndef CLANG_CODEGEN_ABIINFO_H
#define CLANG_CODEGEN_ABIINFO_H
+#include "clang/AST/Type.h"
+
+#include <cassert>
+
namespace llvm {
class Type;
+ class Value;
}
namespace clang {
@@ -38,32 +43,32 @@ namespace clang {
Direct, /// Pass the argument directly using the normal
/// converted LLVM type. Complex and structure types
/// are passed using first class aggregates.
-
+
Indirect, /// Pass the argument indirectly via a hidden pointer
/// with the specified alignment (0 indicates default
/// alignment).
-
+
Ignore, /// Ignore the argument (treat as void). Useful for
/// void and empty structs.
-
+
Coerce, /// Only valid for aggregate return types, the argument
/// should be accessed by coercion to a provided type.
-
+
Expand, /// Only valid for aggregate argument types. The
/// structure should be expanded into consecutive
/// arguments for its constituent fields. Currently
/// expand is only allowed on structures whose fields
/// are all scalar types or are themselves expandable
/// types.
-
+
KindFirst=Direct, KindLast=Expand
};
-
+
private:
Kind TheKind;
const llvm::Type *TypeData;
unsigned UIntData;
-
+
ABIArgInfo(Kind K, const llvm::Type *TD=0,
unsigned UI=0) : TheKind(K),
TypeData(TD),
@@ -71,13 +76,13 @@ namespace clang {
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect() {
- return ABIArgInfo(Direct);
+ static ABIArgInfo getDirect() {
+ return ABIArgInfo(Direct);
}
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
}
- static ABIArgInfo getCoerce(const llvm::Type *T) {
+ static ABIArgInfo getCoerce(const llvm::Type *T) {
return ABIArgInfo(Coerce, T);
}
static ABIArgInfo getIndirect(unsigned Alignment) {
@@ -86,20 +91,20 @@ namespace clang {
static ABIArgInfo getExpand() {
return ABIArgInfo(Expand);
}
-
+
Kind getKind() const { return TheKind; }
bool isDirect() const { return TheKind == Direct; }
bool isIgnore() const { return TheKind == Ignore; }
bool isCoerce() const { return TheKind == Coerce; }
bool isIndirect() const { return TheKind == Indirect; }
bool isExpand() const { return TheKind == Expand; }
-
+
// Coerce accessors
const llvm::Type *getCoerceToType() const {
assert(TheKind == Coerce && "Invalid kind!");
return TypeData;
}
-
+
// ByVal accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
@@ -120,7 +125,7 @@ namespace clang {
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
-
+
// FIXME: This is a gaping layering violation if we wanted to drop
// the ABI information any lower than CodeGen. Of course, for
// VAArg handling it has to be at this level; there is no way to
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index ead689c..d5f803b 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -724,6 +724,8 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
std::string Name = std::string("__copy_helper_block_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
@@ -803,6 +805,8 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
std::string Name = std::string("__destroy_helper_block_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
@@ -889,6 +893,8 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
Name,
@@ -950,6 +956,8 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
Name,
@@ -980,13 +988,36 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
}
llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
- int flag) {
- return CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag);
+ int flag, unsigned Align) {
+ // All alignments below that of pointer alignment collpase down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag;
+ llvm::Constant *& Entry = CGM.AssignCache[kind];
+ if (Entry)
+ return Entry;
+ return Entry=CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag);
}
llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
- int flag) {
- return CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag);
+ int flag,
+ unsigned Align) {
+ // All alignments below that of pointer alignment collpase down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag;
+ llvm::Constant *& Entry = CGM.DestroyCache[kind];
+ if (Entry)
+ return Entry;
+ return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag);
}
llvm::Value *BlockFunction::getBlockObjectDispose() {
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 56d3a2d..5d46ac7 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -98,6 +98,9 @@ public:
llvm::Value *BlockObjectDispose;
const llvm::Type *PtrToInt8Ty;
+ std::map<uint64_t, llvm::Constant *> AssignCache;
+ std::map<uint64_t, llvm::Constant *> DestroyCache;
+
BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
CodeGenTypes &T, CodeGenModule &CodeGen)
: Context(C), TheModule(M), TheTargetData(TD), Types(T),
@@ -131,8 +134,9 @@ public:
variable */
BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
helpers */
- BLOCK_BYREF_CALLER = 128 /* called from __block (byref) copy/dispose
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
};
/// BlockInfo - Information to generate a block literal.
@@ -199,8 +203,10 @@ public:
llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
- llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag);
- llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag);
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
llvm::Value *getBlockObjectAssign();
llvm::Value *getBlockObjectDispose();
diff --git a/lib/CodeGen/CGCXXTemp.cpp b/lib/CodeGen/CGCXXTemp.cpp
index d53a56f..141726a 100644
--- a/lib/CodeGen/CGCXXTemp.cpp
+++ b/lib/CodeGen/CGCXXTemp.cpp
@@ -18,8 +18,29 @@ using namespace CodeGen;
void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
llvm::Value *Ptr) {
llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
-
- LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock, 0));
+
+ llvm::Value *CondPtr = 0;
+
+ // Check if temporaries need to be conditional. If so, we'll create a
+ // condition boolean, initialize it to 0 and
+ if (!ConditionalTempDestructionStack.empty()) {
+ CondPtr = CreateTempAlloca(llvm::Type::Int1Ty, "cond");
+
+ // Initialize it to false. This initialization takes place right after
+ // the alloca insert point.
+ llvm::StoreInst *SI =
+ new llvm::StoreInst(llvm::ConstantInt::getFalse(), CondPtr);
+ llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+ Block->getInstList().insertAfter((llvm::Instruction *)AllocaInsertPt, SI);
+
+ // Now set it to true.
+ Builder.CreateStore(llvm::ConstantInt::getTrue(), CondPtr);
+ }
+
+ LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
+ CondPtr));
+
+ PushCleanupBlock(DtorBlock);
}
void CodeGenFunction::PopCXXTemporary() {
@@ -35,9 +56,28 @@ void CodeGenFunction::PopCXXTemporary() {
EmitBlock(Info.DtorBlock);
+ llvm::BasicBlock *CondEnd = 0;
+
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (Info.CondPtr) {
+ llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
+ CondEnd = createBasicBlock("cond.dtor.end");
+
+ llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
+ Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ EmitBlock(CondBlock);
+ }
+
EmitCXXDestructorCall(Info.Temporary->getDestructor(),
Dtor_Complete, Info.ThisPtr);
+ if (CondEnd) {
+ // Reset the condition. to false.
+ Builder.CreateStore(llvm::ConstantInt::getFalse(), Info.CondPtr);
+ EmitBlock(CondEnd);
+ }
+
LiveTemporaries.pop_back();
}
@@ -47,21 +87,38 @@ CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
bool isAggLocVolatile) {
// Keep track of the current cleanup stack depth.
size_t CleanupStackDepth = CleanupEntries.size();
+ (void) CleanupStackDepth;
unsigned OldNumLiveTemporaries = LiveTemporaries.size();
RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, isAggLocVolatile);
- // Go through the temporaries backwards.
- for (unsigned i = E->getNumTemporaries(); i != 0; --i) {
- assert(LiveTemporaries.back().Temporary == E->getTemporary(i - 1));
- LiveTemporaries.pop_back();
- }
+ // Pop temporaries.
+ while (LiveTemporaries.size() > OldNumLiveTemporaries)
+ PopCXXTemporary();
+
+ assert(CleanupEntries.size() == CleanupStackDepth &&
+ "Cleanup size mismatch!");
+
+ return RV;
+}
+
+void
+CodeGenFunction::PushConditionalTempDestruction() {
+ // Store the current number of live temporaries.
+ ConditionalTempDestructionStack.push_back(LiveTemporaries.size());
+}
- assert(OldNumLiveTemporaries == LiveTemporaries.size() &&
- "Live temporary stack mismatch!");
+void CodeGenFunction::PopConditionalTempDestruction() {
+ size_t NumLiveTemporaries = ConditionalTempDestructionStack.back();
+ ConditionalTempDestructionStack.pop_back();
- EmitCleanupBlocks(CleanupStackDepth);
+ // Pop temporaries.
+ while (LiveTemporaries.size() > NumLiveTemporaries) {
+ assert(LiveTemporaries.back().CondPtr &&
+ "Conditional temporary must have a cond ptr!");
- return RV;
+ PopCXXTemporary();
+ }
}
+
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index ea0b887..b46e860 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -16,15 +16,12 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
-#include "clang/AST/RecordLayout.h"
-#include "llvm/ADT/StringExtras.h"
+#include "clang/Frontend/CompileOptions.h"
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
-#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetData.h"
#include "ABIInfo.h"
@@ -126,1370 +123,6 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
return *FI;
}
-/***/
-
-ABIInfo::~ABIInfo() {}
-
-void ABIArgInfo::dump() const {
- fprintf(stderr, "(ABIArgInfo Kind=");
- switch (TheKind) {
- case Direct:
- fprintf(stderr, "Direct");
- break;
- case Ignore:
- fprintf(stderr, "Ignore");
- break;
- case Coerce:
- fprintf(stderr, "Coerce Type=");
- getCoerceToType()->print(llvm::errs());
- break;
- case Indirect:
- fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
- break;
- case Expand:
- fprintf(stderr, "Expand");
- break;
- }
- fprintf(stderr, ")\n");
-}
-
-/***/
-
-static bool isEmptyRecord(ASTContext &Context, QualType T);
-
-/// isEmptyField - Return true iff a the field is "empty", that is it
-/// is an unnamed bit-field or an (array of) empty record(s).
-static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
- if (FD->isUnnamedBitfield())
- return true;
-
- QualType FT = FD->getType();
- // Constant arrays of empty records count as empty, strip them off.
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
- FT = AT->getElementType();
-
- return isEmptyRecord(Context, FT);
-}
-
-/// isEmptyRecord - Return true iff a structure contains only empty
-/// fields. Note that a structure with a flexible array member is not
-/// considered empty.
-static bool isEmptyRecord(ASTContext &Context, QualType T) {
- const RecordType *RT = T->getAsRecordType();
- if (!RT)
- return 0;
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
- for (RecordDecl::field_iterator i = RD->field_begin(Context),
- e = RD->field_end(Context); i != e; ++i)
- if (!isEmptyField(Context, *i))
- return false;
- return true;
-}
-
-/// isSingleElementStruct - Determine if a structure is a "single
-/// element struct", i.e. it has exactly one non-empty field or
-/// exactly one field which is itself a single element
-/// struct. Structures with flexible array members are never
-/// considered single element structs.
-///
-/// \return The field declaration for the single non-empty field, if
-/// it exists.
-static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
- const RecordType *RT = T->getAsStructureType();
- if (!RT)
- return 0;
-
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return 0;
-
- const Type *Found = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(Context),
- e = RD->field_end(Context); i != e; ++i) {
- const FieldDecl *FD = *i;
- QualType FT = FD->getType();
-
- // Ignore empty fields.
- if (isEmptyField(Context, FD))
- continue;
-
- // If we already found an element then this isn't a single-element
- // struct.
- if (Found)
- return 0;
-
- // Treat single element arrays as the element.
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() != 1)
- break;
- FT = AT->getElementType();
- }
-
- if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
- Found = FT.getTypePtr();
- } else {
- Found = isSingleElementStruct(FT, Context);
- if (!Found)
- return 0;
- }
- }
-
- return Found;
-}
-
-static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
- if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
- return false;
-
- uint64_t Size = Context.getTypeSize(Ty);
- return Size == 32 || Size == 64;
-}
-
-static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
- ASTContext &Context) {
- for (RecordDecl::field_iterator i = RD->field_begin(Context),
- e = RD->field_end(Context); i != e; ++i) {
- const FieldDecl *FD = *i;
-
- if (!is32Or64BitBasicType(FD->getType(), Context))
- return false;
-
- // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
- // how to expand them yet, and the predicate for telling if a bitfield still
- // counts as "basic" is more complicated than what we were doing previously.
- if (FD->isBitField())
- return false;
- }
-
- return true;
-}
-
-namespace {
-/// DefaultABIInfo - The default implementation for ABI specific
-/// details. This implementation provides information which results in
-/// self-consistent and sensible LLVM IR generation, but does not
-/// conform to any particular ABI.
-class DefaultABIInfo : public ABIInfo {
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-/// X86_32ABIInfo - The X86-32 ABI information.
-class X86_32ABIInfo : public ABIInfo {
- ASTContext &Context;
- bool IsDarwin;
-
- static bool isRegisterSize(unsigned Size) {
- return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
- }
-
- static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
-
-public:
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-
- X86_32ABIInfo(ASTContext &Context, bool d)
- : ABIInfo(), Context(Context), IsDarwin(d) {}
-};
-}
-
-
-/// shouldReturnTypeInRegister - Determine if the given type should be
-/// passed in a register (for the Darwin ABI).
-bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
- ASTContext &Context) {
- uint64_t Size = Context.getTypeSize(Ty);
-
- // Type must be register sized.
- if (!isRegisterSize(Size))
- return false;
-
- if (Ty->isVectorType()) {
- // 64- and 128- bit vectors inside structures are not returned in
- // registers.
- if (Size == 64 || Size == 128)
- return false;
-
- return true;
- }
-
- // If this is a builtin, pointer, or complex type, it is ok.
- if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
- return true;
-
- // Arrays are treated like records.
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
- return shouldReturnTypeInRegister(AT->getElementType(), Context);
-
- // Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAsRecordType();
- if (!RT) return false;
-
- // Structure types are passed in register if all fields would be
- // passed in a register.
- for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context),
- e = RT->getDecl()->field_end(Context); i != e; ++i) {
- const FieldDecl *FD = *i;
-
- // Empty fields are ignored.
- if (isEmptyField(Context, FD))
- continue;
-
- // Check fields recursively.
- if (!shouldReturnTypeInRegister(FD->getType(), Context))
- return false;
- }
-
- return true;
-}
-
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else if (const VectorType *VT = RetTy->getAsVectorType()) {
- // On Darwin, some vectors are returned in registers.
- if (IsDarwin) {
- uint64_t Size = Context.getTypeSize(RetTy);
-
- // 128-bit vectors are a special case; they are returned in
- // registers and we need to make sure to pick a type the LLVM
- // backend will like.
- if (Size == 128)
- return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
- 2));
-
- // Always return in register if it fits in a general purpose
- // register, or if it is 64 bits and has a single element.
- if ((Size == 8 || Size == 16 || Size == 32) ||
- (Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
-
- return ABIArgInfo::getIndirect(0);
- }
-
- return ABIArgInfo::getDirect();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- // Structures with flexible arrays are always indirect.
- if (const RecordType *RT = RetTy->getAsStructureType())
- if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(0);
-
- // Outside of Darwin, structs and unions are always indirect.
- if (!IsDarwin && !RetTy->isAnyComplexType())
- return ABIArgInfo::getIndirect(0);
-
- // Classify "single element" structs as their element type.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
- if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
- if (BT->isIntegerType()) {
- // We need to use the size of the structure, padding
- // bit-fields can adjust that to be larger than the single
- // element type.
- uint64_t Size = Context.getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
- } else if (BT->getKind() == BuiltinType::Float) {
- assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
- } else if (BT->getKind() == BuiltinType::Double) {
- assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
- "Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
- }
- } else if (SeltTy->isPointerType()) {
- // FIXME: It would be really nice if this could come out as the proper
- // pointer type.
- llvm::Type *PtrTy =
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
- return ABIArgInfo::getCoerce(PtrTy);
- } else if (SeltTy->isVectorType()) {
- // 64- and 128-bit vectors are never returned in a
- // register when inside a structure.
- uint64_t Size = Context.getTypeSize(RetTy);
- if (Size == 64 || Size == 128)
- return ABIArgInfo::getIndirect(0);
-
- return classifyReturnType(QualType(SeltTy, 0), Context);
- }
- }
-
- // Small structures which are register sized are generally returned
- // in a register.
- if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
- uint64_t Size = Context.getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
- }
-
- return ABIArgInfo::getIndirect(0);
- } else {
- return ABIArgInfo::getDirect();
- }
-}
-
-ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context) const {
- // FIXME: Set alignment on indirect arguments.
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
- // Structures with flexible arrays are always indirect.
- if (const RecordType *RT = Ty->getAsStructureType())
- if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(0);
-
- // Ignore empty structs.
- uint64_t Size = Context.getTypeSize(Ty);
- if (Ty->isStructureType() && Size == 0)
- return ABIArgInfo::getIgnore();
-
- // Expand structs with size <= 128-bits which consist only of
- // basic types (int, long long, float, double, xxx*). This is
- // non-recursive and does not ignore empty fields.
- if (const RecordType *RT = Ty->getAsStructureType()) {
- if (Context.getTypeSize(Ty) <= 4*32 &&
- areAllFields32Or64BitBasicType(RT->getDecl(), Context))
- return ABIArgInfo::getExpand();
- }
-
- return ABIArgInfo::getIndirect(0);
- } else {
- return ABIArgInfo::getDirect();
- }
-}
-
-llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr,
- llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-namespace {
-/// X86_64ABIInfo - The X86_64 ABI information.
-class X86_64ABIInfo : public ABIInfo {
- enum Class {
- Integer = 0,
- SSE,
- SSEUp,
- X87,
- X87Up,
- ComplexX87,
- NoClass,
- Memory
- };
-
- /// merge - Implement the X86_64 ABI merging algorithm.
- ///
- /// Merge an accumulating classification \arg Accum with a field
- /// classification \arg Field.
- ///
- /// \param Accum - The accumulating classification. This should
- /// always be either NoClass or the result of a previous merge
- /// call. In addition, this should never be Memory (the caller
- /// should just return Memory for the aggregate).
- Class merge(Class Accum, Class Field) const;
-
- /// classify - Determine the x86_64 register classes in which the
- /// given type T should be passed.
- ///
- /// \param Lo - The classification for the parts of the type
- /// residing in the low word of the containing object.
- ///
- /// \param Hi - The classification for the parts of the type
- /// residing in the high word of the containing object.
- ///
- /// \param OffsetBase - The bit offset of this type in the
- /// containing object. Some parameters are classified different
- /// depending on whether they straddle an eightbyte boundary.
- ///
- /// If a word is unused its result will be NoClass; if a type should
- /// be passed in Memory then at least the classification of \arg Lo
- /// will be Memory.
- ///
- /// The \arg Lo class will be NoClass iff the argument is ignored.
- ///
- /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
- /// also be ComplexX87.
- void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
- Class &Lo, Class &Hi) const;
-
- /// getCoerceResult - Given a source type \arg Ty and an LLVM type
- /// to coerce to, chose the best way to pass Ty in the same place
- /// that \arg CoerceTo would be passed, but while keeping the
- /// emitted code as simple as possible.
- ///
- /// FIXME: Note, this should be cleaned up to just take an enumeration of all
- /// the ways we might want to pass things, instead of constructing an LLVM
- /// type. This makes this code more explicit, and it makes it clearer that we
- /// are also doing this for correctness in the case of passing scalar types.
- ABIArgInfo getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty,
- ASTContext &Context) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context) const;
-
- ABIArgInfo classifyArgumentType(QualType Ty,
- ASTContext &Context,
- unsigned &neededInt,
- unsigned &neededSSE) const;
-
-public:
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-}
-
-X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
- Class Field) const {
- // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
- // classified recursively so that always two fields are
- // considered. The resulting class is calculated according to
- // the classes of the fields in the eightbyte:
- //
- // (a) If both classes are equal, this is the resulting class.
- //
- // (b) If one of the classes is NO_CLASS, the resulting class is
- // the other class.
- //
- // (c) If one of the classes is MEMORY, the result is the MEMORY
- // class.
- //
- // (d) If one of the classes is INTEGER, the result is the
- // INTEGER.
- //
- // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
- // MEMORY is used as class.
- //
- // (f) Otherwise class SSE is used.
-
- // Accum should never be memory (we should have returned) or
- // ComplexX87 (because this cannot be passed in a structure).
- assert((Accum != Memory && Accum != ComplexX87) &&
- "Invalid accumulated classification during merge.");
- if (Accum == Field || Field == NoClass)
- return Accum;
- else if (Field == Memory)
- return Memory;
- else if (Accum == NoClass)
- return Field;
- else if (Accum == Integer || Field == Integer)
- return Integer;
- else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
- Accum == X87 || Accum == X87Up)
- return Memory;
- else
- return SSE;
-}
-
-void X86_64ABIInfo::classify(QualType Ty,
- ASTContext &Context,
- uint64_t OffsetBase,
- Class &Lo, Class &Hi) const {
- // FIXME: This code can be simplified by introducing a simple value class for
- // Class pairs with appropriate constructor methods for the various
- // situations.
-
- // FIXME: Some of the split computations are wrong; unaligned vectors
- // shouldn't be passed in registers for example, so there is no chance they
- // can straddle an eightbyte. Verify & simplify.
-
- Lo = Hi = NoClass;
-
- Class &Current = OffsetBase < 64 ? Lo : Hi;
- Current = Memory;
-
- if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
- BuiltinType::Kind k = BT->getKind();
-
- if (k == BuiltinType::Void) {
- Current = NoClass;
- } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
- Lo = Integer;
- Hi = Integer;
- } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
- Current = Integer;
- } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
- Current = SSE;
- } else if (k == BuiltinType::LongDouble) {
- Lo = X87;
- Hi = X87Up;
- }
- // FIXME: _Decimal32 and _Decimal64 are SSE.
- // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
- } else if (const EnumType *ET = Ty->getAsEnumType()) {
- // Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
- } else if (Ty->hasPointerRepresentation()) {
- Current = Integer;
- } else if (const VectorType *VT = Ty->getAsVectorType()) {
- uint64_t Size = Context.getTypeSize(VT);
- if (Size == 32) {
- // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
- // float> as integer.
- Current = Integer;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
- if (EB_Real != EB_Imag)
- Hi = Lo;
- } else if (Size == 64) {
- // gcc passes <1 x double> in memory. :(
- if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
- return;
-
- // gcc passes <1 x long long> as INTEGER.
- if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
- Current = Integer;
- else
- Current = SSE;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- if (OffsetBase && OffsetBase != 64)
- Hi = Lo;
- } else if (Size == 128) {
- Lo = SSE;
- Hi = SSEUp;
- }
- } else if (const ComplexType *CT = Ty->getAsComplexType()) {
- QualType ET = Context.getCanonicalType(CT->getElementType());
-
- uint64_t Size = Context.getTypeSize(Ty);
- if (ET->isIntegralType()) {
- if (Size <= 64)
- Current = Integer;
- else if (Size <= 128)
- Lo = Hi = Integer;
- } else if (ET == Context.FloatTy)
- Current = SSE;
- else if (ET == Context.DoubleTy)
- Lo = Hi = SSE;
- else if (ET == Context.LongDoubleTy)
- Current = ComplexX87;
-
- // If this complex type crosses an eightbyte boundary then it
- // should be split.
- uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
- if (Hi == NoClass && EB_Real != EB_Imag)
- Hi = Lo;
- } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
- // Arrays are treated like structures.
-
- uint64_t Size = Context.getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
- return;
-
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
- //
- // Only need to check alignment of array base.
- if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
- return;
-
- // Otherwise implement simplified merge. We could be smarter about
- // this, but it isn't worth it and would be harder to verify.
- Current = NoClass;
- uint64_t EltSize = Context.getTypeSize(AT->getElementType());
- uint64_t ArraySize = AT->getSize().getZExtValue();
- for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
- Class FieldLo, FieldHi;
- classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- // Do post merger cleanup (see below). Only case we worry about is Memory.
- if (Hi == Memory)
- Lo = Memory;
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
- } else if (const RecordType *RT = Ty->getAsRecordType()) {
- uint64_t Size = Context.getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
- return;
-
- const RecordDecl *RD = RT->getDecl();
-
- // Assume variable sized types are passed in memory.
- if (RD->hasFlexibleArrayMember())
- return;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // Reset Lo class, this will be recomputed.
- Current = NoClass;
- unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(Context),
- e = RD->field_end(Context); i != e; ++i, ++idx) {
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- bool BitField = i->isBitField();
-
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
- //
- // Note, skip this test for bit-fields, see below.
- if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
- Lo = Memory;
- return;
- }
-
- // Classify this field.
- //
- // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
- // exceeds a single eightbyte, each is classified
- // separately. Each eightbyte gets initialized to class
- // NO_CLASS.
- Class FieldLo, FieldHi;
-
- // Bit-fields require special handling, they do not force the
- // structure to be passed in memory even if unaligned, and
- // therefore they can straddle an eightbyte.
- if (BitField) {
- // Ignore padding bit-fields.
- if (i->isUnnamedBitfield())
- continue;
-
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
-
- uint64_t EB_Lo = Offset / 64;
- uint64_t EB_Hi = (Offset + Size - 1) / 64;
- FieldLo = FieldHi = NoClass;
- if (EB_Lo) {
- assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
- FieldLo = NoClass;
- FieldHi = Integer;
- } else {
- FieldLo = Integer;
- FieldHi = EB_Hi ? Integer : NoClass;
- }
- } else
- classify(i->getType(), Context, Offset, FieldLo, FieldHi);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
- //
- // (a) If one of the classes is MEMORY, the whole argument is
- // passed in memory.
- //
- // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
-
- // The first of these conditions is guaranteed by how we implement
- // the merge (just bail).
- //
- // The second condition occurs in the case of unions; for example
- // union { _Complex double; unsigned; }.
- if (Hi == Memory)
- Lo = Memory;
- if (Hi == SSEUp && Lo != SSE)
- Hi = SSE;
- }
-}
-
-ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const {
- if (CoerceTo == llvm::Type::Int64Ty) {
- // Integer and pointer types will end up in a general purpose
- // register.
- if (Ty->isIntegralType() || Ty->isPointerType())
- return ABIArgInfo::getDirect();
-
- } else if (CoerceTo == llvm::Type::DoubleTy) {
- // FIXME: It would probably be better to make CGFunctionInfo only map using
- // canonical types than to canonize here.
- QualType CTy = Context.getCanonicalType(Ty);
-
- // Float and double end up in a single SSE reg.
- if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
- return ABIArgInfo::getDirect();
-
- }
-
- return ABIArgInfo::getCoerce(CoerceTo);
-}
-
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
- ASTContext &Context) const {
- // If this is a scalar LLVM value then assume LLVM will pass it in the right
- // place naturally.
- if (!CodeGenFunction::hasAggregateLLVMType(Ty))
- return ABIArgInfo::getDirect();
-
- // FIXME: Set alignment correctly.
- return ABIArgInfo::getIndirect(0);
-}
-
-ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context) const {
- // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
- // classification algorithm.
- X86_64ABIInfo::Class Lo, Hi;
- classify(RetTy, Context, 0, Lo, Hi);
-
- // Check some invariants.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- const llvm::Type *ResType = 0;
- switch (Lo) {
- case NoClass:
- return ABIArgInfo::getIgnore();
-
- case SSEUp:
- case X87Up:
- assert(0 && "Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
- // hidden argument.
- case Memory:
- return getIndirectResult(RetTy, Context);
-
- // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
- // available register of the sequence %rax, %rdx is used.
- case Integer:
- ResType = llvm::Type::Int64Ty; break;
-
- // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
- // available SSE register of the sequence %xmm0, %xmm1 is used.
- case SSE:
- ResType = llvm::Type::DoubleTy; break;
-
- // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
- // returned on the X87 stack in %st0 as 80-bit x87 number.
- case X87:
- ResType = llvm::Type::X86_FP80Ty; break;
-
- // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
- // part of the value is returned in %st0 and the imaginary part in
- // %st1.
- case ComplexX87:
- assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
- llvm::Type::X86_FP80Ty,
- NULL);
- break;
- }
-
- switch (Hi) {
- // Memory was handled previously and X87 should
- // never occur as a hi class.
- case Memory:
- case X87:
- assert(0 && "Invalid classification for hi word.");
-
- case ComplexX87: // Previously handled.
- case NoClass: break;
-
- case Integer:
- ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
- break;
- case SSE:
- ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
- // is passed in the upper half of the last used SSE register.
- //
- // SSEUP should always be preceeded by SSE, just widen.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
- // returned together with the previous X87 value in %st0.
- case X87Up:
- // If X87Up is preceeded by X87, we don't need to do
- // anything. However, in some cases with unions it may not be
- // preceeded by X87. In such situations we follow gcc and pass the
- // extra bits in an SSE reg.
- if (Lo != X87)
- ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
- break;
- }
-
- return getCoerceResult(RetTy, ResType, Context);
-}
-
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
- unsigned &neededInt,
- unsigned &neededSSE) const {
- X86_64ABIInfo::Class Lo, Hi;
- classify(Ty, Context, 0, Lo, Hi);
-
- // Check some invariants.
- // FIXME: Enforce these by construction.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- neededInt = 0;
- neededSSE = 0;
- const llvm::Type *ResType = 0;
- switch (Lo) {
- case NoClass:
- return ABIArgInfo::getIgnore();
-
- // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
- // on the stack.
- case Memory:
-
- // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
- // COMPLEX_X87, it is passed in memory.
- case X87:
- case ComplexX87:
- return getIndirectResult(Ty, Context);
-
- case SSEUp:
- case X87Up:
- assert(0 && "Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
- // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
- // and %r9 is used.
- case Integer:
- ++neededInt;
- ResType = llvm::Type::Int64Ty;
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
- // available SSE register is used, the registers are taken in the
- // order from %xmm0 to %xmm7.
- case SSE:
- ++neededSSE;
- ResType = llvm::Type::DoubleTy;
- break;
- }
-
- switch (Hi) {
- // Memory was handled previously, ComplexX87 and X87 should
- // never occur as hi classes, and X87Up must be preceed by X87,
- // which is passed in memory.
- case Memory:
- case X87:
- case ComplexX87:
- assert(0 && "Invalid classification for hi word.");
- break;
-
- case NoClass: break;
- case Integer:
- ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
- ++neededInt;
- break;
-
- // X87Up generally doesn't occur here (long double is passed in
- // memory), except in situations involving unions.
- case X87Up:
- case SSE:
- ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
- ++neededSSE;
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
- // eightbyte is passed in the upper half of the last used SSE
- // register.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
- break;
- }
-
- return getCoerceResult(Ty, ResType, Context);
-}
-
-void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
-
- // Keep track of the number of assigned registers.
- unsigned freeIntRegs = 6, freeSSERegs = 8;
-
- // If the return value is indirect, then the hidden argument is consuming one
- // integer register.
- if (FI.getReturnInfo().isIndirect())
- --freeIntRegs;
-
- // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
- // get assigned (in left-to-right order) for passing as follows...
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
-
- // AMD64-ABI 3.2.3p3: If there are no registers available for any
- // eightbyte of an argument, the whole argument is passed on the
- // stack. If registers have already been assigned for some
- // eightbytes of such an argument, the assignments get reverted.
- if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
- freeIntRegs -= neededInt;
- freeSSERegs -= neededSSE;
- } else {
- it->info = getIndirectResult(it->type, Context);
- }
- }
-}
-
-static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) {
- llvm::Value *overflow_arg_area_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
- llvm::Value *overflow_arg_area =
- CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
-
- // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
- // byte boundary if alignment needed by type exceeds 8 byte boundary.
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
- if (Align > 8) {
- // Note that we follow the ABI & gcc here, even though the type
- // could in theory have an alignment greater than 16. This case
- // shouldn't ever matter in practice.
-
- // overflow_arg_area = (overflow_arg_area + 15) & ~15;
- llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
- overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
- llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
- llvm::Type::Int64Ty);
- llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
- overflow_arg_area =
- CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
- overflow_arg_area->getType(),
- "overflow_arg_area.align");
- }
-
- // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *Res =
- CGF.Builder.CreateBitCast(overflow_arg_area,
- llvm::PointerType::getUnqual(LTy));
-
- // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
- // l->overflow_arg_area + sizeof(type).
- // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
- // an 8 byte boundary.
-
- uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
- llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
- (SizeInBytes + 7) & ~7);
- overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
- "overflow_arg_area.next");
- CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
-
- // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Res;
-}
-
-llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // Assume that va_list type is correct; should be pointer to LLVM type:
- // struct {
- // i32 gp_offset;
- // i32 fp_offset;
- // i8* overflow_arg_area;
- // i8* reg_save_area;
- // };
- unsigned neededInt, neededSSE;
- ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
- neededInt, neededSSE);
-
- // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
- // in the registers. If not go to step 7.
- if (!neededInt && !neededSSE)
- return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
-
- // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
- // general purpose registers needed to pass type and num_fp to hold
- // the number of floating point registers needed.
-
- // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
- // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
- // l->fp_offset > 304 - num_fp * 16 go to step 7.
- //
- // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
- // register save space).
-
- llvm::Value *InRegs = 0;
- llvm::Value *gp_offset_p = 0, *gp_offset = 0;
- llvm::Value *fp_offset_p = 0, *fp_offset = 0;
- if (neededInt) {
- gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
- gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
- InRegs =
- CGF.Builder.CreateICmpULE(gp_offset,
- llvm::ConstantInt::get(llvm::Type::Int32Ty,
- 48 - neededInt * 8),
- "fits_in_gp");
- }
-
- if (neededSSE) {
- fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
- fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
- llvm::Value *FitsInFP =
- CGF.Builder.CreateICmpULE(fp_offset,
- llvm::ConstantInt::get(llvm::Type::Int32Ty,
- 176 - neededSSE * 16),
- "fits_in_fp");
- InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
- }
-
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
-
- // Emit code to load the value if it was passed in registers.
-
- CGF.EmitBlock(InRegBlock);
-
- // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
- // an offset of l->gp_offset and/or l->fp_offset. This may require
- // copying to a temporary location in case the parameter is passed
- // in different register classes or requires an alignment greater
- // than 8 for general purpose registers and 16 for XMM registers.
- //
- // FIXME: This really results in shameful code when we end up needing to
- // collect arguments from different places; often what should result in a
- // simple assembling of a structure from scattered addresses has many more
- // loads than necessary. Can we clean this up?
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *RegAddr =
- CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
- "reg_save_area");
- if (neededInt && neededSSE) {
- // FIXME: Cleanup.
- assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
- const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
- llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
- assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
- const llvm::Type *TyLo = ST->getElementType(0);
- const llvm::Type *TyHi = ST->getElementType(1);
- assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
- "Unexpected ABI info for mixed regs");
- const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
- const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
- llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
- llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
- llvm::Value *V =
- CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
-
- RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
- } else if (neededInt) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
- } else {
- if (neededSSE == 1) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
- } else {
- assert(neededSSE == 2 && "Invalid number of needed registers!");
- // SSE registers are spaced 16 bytes apart in the register save
- // area, we need to collect the two eightbytes together.
- llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegAddrHi =
- CGF.Builder.CreateGEP(RegAddrLo,
- llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
- const llvm::Type *DblPtrTy =
- llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
- llvm::Type::DoubleTy,
- NULL);
- llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
- }
- }
-
- // AMD64-ABI 3.5.7p5: Step 5. Set:
- // l->gp_offset = l->gp_offset + num_gp * 8
- // l->fp_offset = l->fp_offset + num_fp * 16.
- if (neededInt) {
- llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
- neededInt * 8);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
- gp_offset_p);
- }
- if (neededSSE) {
- llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
- neededSSE * 16);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
- fp_offset_p);
- }
- CGF.EmitBranch(ContBlock);
-
- // Emit code to load the value if it was passed in memory.
-
- CGF.EmitBlock(InMemBlock);
- llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
-
- // Return the appropriate result.
-
- CGF.EmitBlock(ContBlock);
- llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
- "vaarg.addr");
- ResAddr->reserveOperandSpace(2);
- ResAddr->addIncoming(RegAddr, InRegBlock);
- ResAddr->addIncoming(MemAddr, InMemBlock);
-
- return ResAddr;
-}
-
-// ABI Info for PIC16
-class PIC16ABIInfo : public ABIInfo {
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context);
- }
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-
-};
-
-ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else {
- return ABIArgInfo::getDirect();
- }
-}
-
-ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context) const {
- return ABIArgInfo::getDirect();
-}
-
-llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return 0;
-}
-
-class ARMABIInfo : public ABIInfo {
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
-
- virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-};
-
-void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- it->info = classifyArgumentType(it->type, Context);
- }
-}
-
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context) const {
- if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
- return ABIArgInfo::getDirect();
- }
- // FIXME: This is kind of nasty... but there isn't much choice because the ARM
- // backend doesn't support byval.
- // FIXME: This doesn't handle alignment > 64 bits.
- const llvm::Type* ElemTy;
- unsigned SizeRegs;
- if (Context.getTypeAlign(Ty) > 32) {
- ElemTy = llvm::Type::Int64Ty;
- SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
- } else {
- ElemTy = llvm::Type::Int32Ty;
- SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
- }
- std::vector<const llvm::Type*> LLVMFields;
- LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
- const llvm::Type* STy = llvm::StructType::get(LLVMFields, true);
- return ABIArgInfo::getCoerce(STy);
-}
-
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- // Aggregates <= 4 bytes are returned in r0; other aggregates
- // are returned indirectly.
- uint64_t Size = Context.getTypeSize(RetTy);
- if (Size <= 32)
- return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
- return ABIArgInfo::getIndirect(0);
- } else {
- return ABIArgInfo::getDirect();
- }
-}
-
-llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // FIXME: Need to handle alignment
- const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
-
- CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
- "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
-
- uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr =
- Builder.CreateGEP(Addr,
- llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
- "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context) const {
- if (RetTy->isVoidType()) {
- return ABIArgInfo::getIgnore();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- return ABIArgInfo::getIndirect(0);
- } else {
- return ABIArgInfo::getDirect();
- }
-}
-
-ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context) const {
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
- return ABIArgInfo::getIndirect(0);
- } else {
- return ABIArgInfo::getDirect();
- }
-}
-
-llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- return 0;
-}
-
-const ABIInfo &CodeGenTypes::getABIInfo() const {
- if (TheABIInfo)
- return *TheABIInfo;
-
- // For now we just cache this in the CodeGenTypes and don't bother
- // to free it.
- const char *TargetPrefix = getContext().Target.getTargetPrefix();
- if (strcmp(TargetPrefix, "x86") == 0) {
- bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin");
- switch (getContext().Target.getPointerWidth(0)) {
- case 32:
- return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin));
- case 64:
- return *(TheABIInfo = new X86_64ABIInfo());
- }
- } else if (strcmp(TargetPrefix, "arm") == 0) {
- // FIXME: Support for OABI?
- return *(TheABIInfo = new ARMABIInfo());
- } else if (strcmp(TargetPrefix, "pic16") == 0) {
- return *(TheABIInfo = new PIC16ABIInfo());
- }
-
- return *(TheABIInfo = new DefaultABIInfo);
-}
-
-/***/
-
CGFunctionInfo::CGFunctionInfo(QualType ResTy,
const llvm::SmallVector<QualType, 16> &ArgTys) {
NumArgs = ArgTys.size();
@@ -1637,13 +270,7 @@ static void CreateCoercedStore(llvm::Value *Src,
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
- if (SrcSize >= DstSize) {
- // Generally SrcSize is never greater than DstSize, since this means we are
- // losing bits. However, this can happen in cases where the structure has
- // additional padding, for example due to a user specified alignment.
- //
- // FIXME: Assert that we aren't truncating non-padding bits when have access
- // to that information.
+ if (SrcSize <= DstSize) {
llvm::Value *Casted =
CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
// FIXME: Use better alignment / avoid requiring aligned store.
@@ -1651,6 +278,13 @@ static void CreateCoercedStore(llvm::Value *Src,
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
+
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
CGF.Builder.CreateStore(Src, Tmp);
llvm::Value *Casted =
@@ -1751,6 +385,11 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs |= llvm::Attribute::ReadOnly;
}
+ if (CompileOpts.DisableRedZone)
+ FuncAttrs |= llvm::Attribute::NoRedZone;
+ if (CompileOpts.NoImplicitFloat)
+ FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+
QualType RetTy = FI.getReturnType();
unsigned Index = 1;
const ABIArgInfo &RetAI = FI.getReturnInfo();
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index bcad77b..29eaaad 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -232,7 +232,9 @@ const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty,
Types[4] = PtrToInt8Ty;
Types[5] = PtrToInt8Ty;
}
- // FIXME: Align this on at least an Align boundary.
+ // FIXME: Align this on at least an Align boundary, assert if we can't.
+ assert((Align <= unsigned(Target.getPointerAlign(0))/8)
+ && "Can't align more thqn pointer yet");
Types[needsCopyDispose*2 + 4] = LTy;
return llvm::StructType::get(Types, false);
}
@@ -244,22 +246,22 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
QualType Ty = D.getType();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
+ unsigned Align = 0;
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
// A normal fixed sized variable becomes an alloca in the entry block.
const llvm::Type *LTy = ConvertTypeForMem(Ty);
+ Align = getContext().getDeclAlignInBytes(&D);
if (isByRef)
- LTy = BuildByRefType(Ty, getContext().getDeclAlignInBytes(&D));
+ LTy = BuildByRefType(Ty, Align);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameAsString().c_str());
if (isByRef)
- Alloc->setAlignment(std::max(getContext().getDeclAlignInBytes(&D),
- unsigned(Target.getPointerAlign(0) / 8)));
- else
- Alloc->setAlignment(getContext().getDeclAlignInBytes(&D));
+ Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
+ Alloc->setAlignment(Align);
DeclPtr = Alloc;
} else {
// Targets that don't support recursion emit locals as globals.
@@ -401,11 +403,12 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
if (flags & BLOCK_HAS_COPY_DISPOSE) {
BlockHasCopyDispose = true;
llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
- Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag),
+ Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
copy_helper);
llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
- Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag),
+ Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
+ Align),
destroy_helper);
}
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 8d903d9..4268ae3 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -263,17 +263,21 @@ void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
+ CGF.PushConditionalTempDestruction();
CGF.EmitBlock(LHSBlock);
// Handle the GNU extension for missing LHS.
assert(E->getLHS() && "Must have LHS for aggregate value");
Visit(E->getLHS());
+ CGF.PopConditionalTempDestruction();
CGF.EmitBranch(ContBlock);
+ CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
Visit(E->getRHS());
+ CGF.PopConditionalTempDestruction();
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 950e9e5..ed18d32 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -1290,8 +1290,10 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getFalse(), *PI);
+ CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ CGF.PopConditionalTempDestruction();
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
@@ -1335,10 +1337,14 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
PI != PE; ++PI)
PN->addIncoming(llvm::ConstantInt::getTrue(), *PI);
+ CGF.PushConditionalTempDestruction();
+
// Emit the RHS condition as a bool value.
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ CGF.PopConditionalTempDestruction();
+
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
@@ -1446,7 +1452,8 @@ VisitConditionalOperator(const ConditionalOperator *E) {
CGF.getContext().BoolTy);
Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
}
-
+
+ CGF.PushConditionalTempDestruction();
CGF.EmitBlock(LHSBlock);
// Handle the GNU extension for missing LHS.
@@ -1456,12 +1463,15 @@ VisitConditionalOperator(const ConditionalOperator *E) {
else // Perform promotions, to handle cases like "short ?: int"
LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+ CGF.PopConditionalTempDestruction();
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
+ CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
Value *RHS = Visit(E->getRHS());
+ CGF.PopConditionalTempDestruction();
RHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 5e7eec9..a70f718 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -1544,14 +1544,8 @@ LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
const ObjCInterfaceDecl *OID,
const ObjCIvarDecl *OIVD) {
- for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(),
- IVE = OID->ivar_end(); IVI != IVE; ++IVI)
- if (OIVD == *IVI)
- return OID;
-
- // Also look in synthesized ivars.
llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- Context.CollectSynthesizedIvars(OID, Ivars);
+ Context.ShallowCollectObjCIvars(OID, Ivars);
for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
if (OIVD == Ivars[k])
return OID;
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 8f1404d..75755ec 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -47,14 +47,8 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
// ASTContext::getObjCLayout is implemented. This should be fixed to
// get the information from the layout directly.
Index = 0;
- for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(),
- IVE = OID->ivar_end(); IVI != IVE; ++IVI, ++Index)
- if (OIVD == *IVI)
- return OID;
-
- // Also look in synthesized ivars.
llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- Context.CollectSynthesizedIvars(OID, Ivars);
+ Context.ShallowCollectObjCIvars(OID, Ivars);
for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
if (OIVD == Ivars[k])
return OID;
@@ -908,15 +902,6 @@ protected:
unsigned Align,
bool AddToUsed);
- /// GetNamedIvarList - Return the list of ivars in the interface
- /// itself (not including super classes and not including unnamed
- /// bitfields).
- ///
- /// For the non-fragile ABI, this also includes synthesized property
- /// ivars.
- void GetNamedIvarList(const ObjCInterfaceDecl *OID,
- llvm::SmallVector<ObjCIvarDecl*, 16> &Res) const;
-
CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
llvm::Value *Sel,
@@ -2198,10 +2183,13 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
- GetNamedIvarList(OID, OIvars);
+ CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
ObjCIvarDecl *IVD = OIvars[i];
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
Ivar[0] = GetMethodVarName(IVD->getIdentifier());
Ivar[1] = GetMethodVarType(IVD);
Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
@@ -4696,25 +4684,6 @@ llvm::Constant * CGObjCNonFragileABIMac::EmitIvarOffsetVar(
/// }
///
-void CGObjCCommonMac::GetNamedIvarList(const ObjCInterfaceDecl *OID,
- llvm::SmallVector<ObjCIvarDecl*, 16> &Res) const {
- for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
- E = OID->ivar_end(); I != E; ++I) {
- // Ignore unnamed bit-fields.
- if (!(*I)->getDeclName())
- continue;
-
- Res.push_back(*I);
- }
-
- // Also save synthesize ivars.
- // FIXME. Why can't we just use passed in Res small vector?
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CGM.getContext().CollectSynthesizedIvars(OID, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
- Res.push_back(Ivars[k]);
-}
-
llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
const ObjCImplementationDecl *ID) {
@@ -4727,10 +4696,13 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
// Collect declared and synthesized ivars in a small vector.
llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
- GetNamedIvarList(OID, OIvars);
+ CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
ObjCIvarDecl *IVD = OIvars[i];
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
ComputeIvarBaseOffset(CGM, ID, IVD));
Ivar[1] = GetMethodVarName(IVD->getIdentifier());
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index d6c46a8..c206a3b 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -5,6 +5,7 @@ add_clang_library(clangCodeGen
CGBlocks.cpp
CGCall.cpp
CGCXX.cpp
+ CGCXXTemp.cpp
CGDebugInfo.cpp
CGDecl.cpp
CGExprAgg.cpp
@@ -21,4 +22,5 @@ add_clang_library(clangCodeGen
CodeGenTypes.cpp
Mangle.cpp
ModuleBuilder.cpp
+ TargetABIInfo.cpp
)
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index c91a052..72c4aa4 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -160,6 +160,20 @@ public:
/// this behavior for branches?
void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+ /// PushConditionalTempDestruction - Should be called before a conditional
+ /// part of an expression is emitted. For example, before the RHS of the
+ /// expression below is emitted:
+ ///
+ /// b && f(T());
+ ///
+ /// This is used to make sure that any temporaryes created in the conditional
+ /// branch are only destroyed if the branch is taken.
+ void PushConditionalTempDestruction();
+
+ /// PopConditionalTempDestruction - Should be called after a conditional
+ /// part of an expression has been emitted.
+ void PopConditionalTempDestruction();
+
private:
CGDebugInfo* DebugInfo;
@@ -263,6 +277,11 @@ private:
};
llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries;
+
+ /// ConditionalTempDestructionStack - Contains the number of live temporaries
+ /// when PushConditionalTempDestruction was called. This is used so that
+ /// we know how many temporaries were created by a certain expression.
+ llvm::SmallVector<size_t, 4> ConditionalTempDestructionStack;
public:
CodeGenFunction(CodeGenModule &cgm);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index b69301e..5c12c81 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -298,7 +298,7 @@ void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
GV->setLinkage(llvm::Function::InternalLinkage);
} else if (D->hasAttr<DLLExportAttr>()) {
GV->setLinkage(llvm::Function::DLLExportLinkage);
- } else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>()) {
+ } else if (D->hasAttr<WeakAttr>()) {
GV->setLinkage(llvm::Function::WeakAnyLinkage);
} else if (Linkage == GVA_C99Inline) {
// In C99 mode, 'inline' functions are guaranteed to have a strong
@@ -853,7 +853,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setLinkage(llvm::Function::DLLImportLinkage);
else if (D->hasAttr<DLLExportAttr>())
GV->setLinkage(llvm::Function::DLLExportLinkage);
- else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ else if (D->hasAttr<WeakAttr>())
GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
else if (!CompileOpts.NoCommon &&
(!D->hasExternalStorage() && !D->getInit()))
@@ -891,8 +891,9 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
UI != E; ) {
// TODO: Do invokes ever occur in C code? If so, we should handle them too.
+ unsigned OpNo = UI.getOperandNo();
llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*UI++);
- if (!CI) continue;
+ if (!CI || OpNo != 0) continue;
// If the return types don't match exactly, and if the call isn't dead, then
// we can't transform this call.
diff --git a/lib/CodeGen/TargetABIInfo.cpp b/lib/CodeGen/TargetABIInfo.cpp
new file mode 100644
index 0000000..573ffed
--- /dev/null
+++ b/lib/CodeGen/TargetABIInfo.cpp
@@ -0,0 +1,1379 @@
+//===---- TargetABIInfo.cpp - Encapsulate target ABI details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/Type.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+ABIInfo::~ABIInfo() {}
+
+void ABIArgInfo::dump() const {
+ fprintf(stderr, "(ABIArgInfo Kind=");
+ switch (TheKind) {
+ case Direct:
+ fprintf(stderr, "Direct");
+ break;
+ case Ignore:
+ fprintf(stderr, "Ignore");
+ break;
+ case Coerce:
+ fprintf(stderr, "Coerce Type=");
+ getCoerceToType()->print(llvm::errs());
+ break;
+ case Indirect:
+ fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
+ break;
+ case Expand:
+ fprintf(stderr, "Expand");
+ break;
+ }
+ fprintf(stderr, ")\n");
+}
+
+static bool isEmptyRecord(ASTContext &Context, QualType T);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+ // Constant arrays of empty records count as empty, strip them off.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ FT = AT->getElementType();
+
+ return isEmptyRecord(Context, FT);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T) {
+ const RecordType *RT = T->getAsRecordType();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i)
+ if (!isEmptyField(Context, *i))
+ return false;
+ return true;
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
+ ASTContext &Context) {
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ ASTContext &Context;
+ bool IsDarwin;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+
+public:
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(ASTContext &Context, bool d)
+ : ABIInfo(), Context(Context), IsDarwin(d) {}
+};
+}
+
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, or complex type, it is ok.
+ if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAsRecordType();
+ if (!RT) return false;
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context),
+ e = RT->getDecl()->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (const VectorType *VT = RetTy->getAsVectorType()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwin) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
+ 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = RetTy->getAsStructureType())
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+
+ // Outside of Darwin, structs and unions are always indirect.
+ if (!IsDarwin && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Classify "single element" structs as their element type.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
+ if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
+ if (BT->isIntegerType()) {
+ // We need to use the size of the structure, padding
+ // bit-fields can adjust that to be larger than the single
+ // element type.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
+ } else if (BT->getKind() == BuiltinType::Float) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
+ } else if (BT->getKind() == BuiltinType::Double) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
+ }
+ } else if (SeltTy->isPointerType()) {
+ // FIXME: It would be really nice if this could come out as the proper
+ // pointer type.
+ llvm::Type *PtrTy =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ return ABIArgInfo::getCoerce(PtrTy);
+ } else if (SeltTy->isVectorType()) {
+ // 64- and 128-bit vectors are never returned in a
+ // register when inside a structure.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size == 64 || Size == 128)
+ return ABIArgInfo::getIndirect(0);
+
+ return classifyReturnType(QualType(SeltTy, 0), Context);
+ }
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAsStructureType())
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+
+ // Ignore empty structs.
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (Ty->isStructureType() && Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Expand structs with size <= 128-bits which consist only of
+ // basic types (int, long long, float, double, xxx*). This is
+ // non-recursive and does not ignore empty fields.
+ if (const RecordType *RT = Ty->getAsStructureType()) {
+ if (Context.getTypeSize(Ty) <= 4*32 &&
+ areAllFields32Or64BitBasicType(RT->getDecl(), Context))
+ return ABIArgInfo::getExpand();
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ Class merge(Class Accum, Class Field) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const;
+
+ /// getCoerceResult - Given a source type \arg Ty and an LLVM type
+ /// to coerce to, chose the best way to pass Ty in the same place
+ /// that \arg CoerceTo would be passed, but while keeping the
+ /// emitted code as simple as possible.
+ ///
+ /// FIXME: Note, this should be cleaned up to just take an enumeration of all
+ /// the ways we might want to pass things, instead of constructing an LLVM
+ /// type. This makes this code more explicit, and it makes it clearer that we
+ /// are also doing this for correctness in the case of passing scalar types.
+ ABIArgInfo getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+public:
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
+ Class Field) const {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ else if (Field == Memory)
+ return Memory;
+ else if (Accum == NoClass)
+ return Field;
+ else if (Accum == Integer || Field == Integer)
+ return Integer;
+ else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ else
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty,
+ ASTContext &Context,
+ uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ } else if (const EnumType *ET = Ty->getAsEnumType()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
+ } else if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ } else if (const VectorType *VT = Ty->getAsVectorType()) {
+ uint64_t Size = Context.getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128) {
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ } else if (const ComplexType *CT = Ty->getAsComplexType()) {
+ QualType ET = Context.getCanonicalType(CT->getElementType());
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (ET->isIntegralType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == Context.FloatTy)
+ Current = SSE;
+ else if (ET == Context.DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == Context.LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = Context.getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // Do post merger cleanup (see below). Only case we worry about is Memory.
+ if (Hi == Memory)
+ Lo = Memory;
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ } else if (const RecordType *RT = Ty->getAsRecordType()) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is MEMORY, the whole argument is
+ // passed in memory.
+ //
+ // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
+
+ // The first of these conditions is guaranteed by how we implement
+ // the merge (just bail).
+ //
+ // The second condition occurs in the case of unions; for example
+ // union { _Complex double; unsigned; }.
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const {
+ if (CoerceTo == llvm::Type::Int64Ty) {
+ // Integer and pointer types will end up in a general purpose
+ // register.
+ if (Ty->isIntegralType() || Ty->isPointerType())
+ return ABIArgInfo::getDirect();
+
+ } else if (CoerceTo == llvm::Type::DoubleTy) {
+ // FIXME: It would probably be better to make CGFunctionInfo only map using
+ // canonical types than to canonize here.
+ QualType CTy = Context.getCanonicalType(Ty);
+
+ // Float and double end up in a single SSE reg.
+ if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
+ return ABIArgInfo::getDirect();
+
+ }
+
+ return ABIArgInfo::getCoerce(CoerceTo);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty))
+ return ABIArgInfo::getDirect();
+
+ // FIXME: Set alignment correctly.
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectResult(RetTy, Context);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = llvm::Type::Int64Ty; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = llvm::Type::DoubleTy; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::X86_FP80Ty; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
+ llvm::Type::X86_FP80Ty,
+ NULL);
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ assert(0 && "Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass: break;
+
+ case Integer:
+ ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
+ break;
+ case SSE:
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the upper half of the last used SSE register.
+ //
+ // SSEUP should always be preceeded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceeded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceeded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87)
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ break;
+ }
+
+ return getCoerceResult(RetTy, ResType, Context);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+ unsigned &neededInt,
+ unsigned &neededSSE) const {
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ return getIndirectResult(Ty, Context);
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+ ResType = llvm::Type::Int64Ty;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE:
+ ++neededSSE;
+ ResType = llvm::Type::DoubleTy;
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceed by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ assert(0 && "Invalid classification for hi word.");
+ break;
+
+ case NoClass: break;
+ case Integer:
+ ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
+ ++neededInt;
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
+ break;
+ }
+
+ return getCoerceResult(Ty, ResType, Context);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, Context);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // Note that we follow the ABI & gcc here, even though the type
+ // could in theory have an alignment greater than 16. This case
+ // shouldn't ever matter in practice.
+
+ // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ llvm::Type::Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+ ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
+ neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs =
+ CGF.Builder.CreateICmpULE(gp_offset,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 48 - neededInt * 8),
+ "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ CGF.Builder.CreateICmpULE(fp_offset,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 176 - neededSSE * 16),
+ "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+ const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ const llvm::Type *TyLo = ST->getElementType(0);
+ const llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
+ "Unexpected ABI info for mixed regs");
+ const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi =
+ CGF.Builder.CreateGEP(RegAddrLo,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
+ llvm::Type::DoubleTy,
+ NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+ "vaarg.addr");
+ ResAddr->reserveOperandSpace(2);
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+
+ return ResAddr;
+}
+
+// ABI Info for PIC16
+class PIC16ABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+};
+
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ return ABIArgInfo::getDirect();
+}
+
+llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+class ARMABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ it->info = classifyArgumentType(it->type, Context);
+ }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getDirect();
+ }
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ const llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (Context.getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::Int64Ty;
+ SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::Int32Ty;
+ SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
+ }
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
+ const llvm::Type* STy = llvm::StructType::get(LLVMFields, true);
+ return ABIArgInfo::getCoerce(STy);
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size <= 32)
+ return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+const ABIInfo &CodeGenTypes::getABIInfo() const {
+ if (TheABIInfo)
+ return *TheABIInfo;
+
+ // For now we just cache this in the CodeGenTypes and don't bother
+ // to free it.
+ const char *TargetPrefix = getContext().Target.getTargetPrefix();
+ if (strcmp(TargetPrefix, "x86") == 0) {
+ bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin");
+ switch (getContext().Target.getPointerWidth(0)) {
+ case 32:
+ return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin));
+ case 64:
+ return *(TheABIInfo = new X86_64ABIInfo());
+ }
+ } else if (strcmp(TargetPrefix, "arm") == 0) {
+ // FIXME: Support for OABI?
+ return *(TheABIInfo = new ARMABIInfo());
+ } else if (strcmp(TargetPrefix, "pic16") == 0) {
+ return *(TheABIInfo = new PIC16ABIInfo());
+ }
+
+ return *(TheABIInfo = new DefaultABIInfo);
+}
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index eca6413..6b082c8 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -329,7 +329,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_msoft_float,
options::OPT_mno_soft_float,
false))
- CmdArgs.push_back("--soft-float");
+ CmdArgs.push_back("--no-implicit-float");
// FIXME: Handle -mtune=.
(void) Args.hasArg(options::OPT_mtune_EQ);
@@ -505,6 +505,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fblocks=0");
}
+ // -fsigned-char/-funsigned-char default varies depending on platform; only
+ // pass if specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fsigned_char,
+ options::OPT_funsigned_char)) {
+ if (A->getOption().matches(options::OPT_fsigned_char))
+ CmdArgs.push_back("-fsigned-char");
+ else
+ CmdArgs.push_back("-fsigned-char=0");
+ }
+
// -fno-pascal-strings is default, only pass non-default. If the
// -tool chain happened to translate to -mpascal-strings, we want to
// -back translate here.
@@ -539,6 +549,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
options::OPT_fno_diagnostics_show_option))
CmdArgs.push_back("-fdiagnostics-show-option");
+ if (!Args.hasFlag(options::OPT_fcolor_diagnostics,
+ options::OPT_fno_color_diagnostics))
+ CmdArgs.push_back("-fno-color-diagnostics");
// -fdollars-in-identifiers default varies depending on platform and
// language; only pass if specified.
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index e3a45d4..01729fa 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -23,8 +23,8 @@ namespace clang {
// Append a #define line to Buf for Macro. Macro should be of the form XXX,
// in which case we emit "#define XXX 1" or "XXX=Y z W" in which case we emit
// "#define XXX Y z W". To get a #define with no value, use "XXX=".
-static void DefineBuiltinMacro(std::vector<char> &Buf, const char *Macro,
- const char *Command = "#define ") {
+static void DefineBuiltinMacro(std::vector<char> &Buf, const char *Macro) {
+ const char *Command = "#define ";
Buf.insert(Buf.end(), Command, Command+strlen(Command));
if (const char *Equal = strchr(Macro, '=')) {
// Turn the = into ' '.
@@ -367,7 +367,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
sprintf(MacroBuf, "__POINTER_WIDTH__=%d", (int)TI.getPointerWidth(0));
DefineBuiltinMacro(Buf, MacroBuf);
- if (!TI.isCharSigned())
+ if (!LangOpts.CharIsSigned)
DefineBuiltinMacro(Buf, "__CHAR_UNSIGNED__");
// Define fixed-sized integer types for stdint.h
@@ -403,11 +403,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// command line options or something.
DefineBuiltinMacro(Buf, "__FINITE_MATH_ONLY__=0");
- if (LangOpts.Static)
- DefineBuiltinMacro(Buf, "__STATIC__=1");
- else
- DefineBuiltinMacro(Buf, "__DYNAMIC__=1");
-
if (LangOpts.GNUInline)
DefineBuiltinMacro(Buf, "__GNUC_GNU_INLINE__=1");
else
diff --git a/lib/Frontend/PCHReader.cpp b/lib/Frontend/PCHReader.cpp
index 63e4337..87fc839 100644
--- a/lib/Frontend/PCHReader.cpp
+++ b/lib/Frontend/PCHReader.cpp
@@ -1521,6 +1521,7 @@ bool PCHReader::ParseLanguageOptions(
PARSE_LANGOPT_IMPORTANT(GNUInline, diag::warn_pch_gnu_inline);
PARSE_LANGOPT_IMPORTANT(NoInline, diag::warn_pch_no_inline);
PARSE_LANGOPT_IMPORTANT(AccessControl, diag::warn_pch_access_control);
+ PARSE_LANGOPT_IMPORTANT(CharIsSigned, diag::warn_pch_char_signed);
if ((LangOpts.getGCMode() != 0) != (Record[Idx] != 0)) {
Diag(diag::warn_pch_gc_mode)
<< (unsigned)Record[Idx] << LangOpts.getGCMode();
diff --git a/lib/Frontend/PCHWriter.cpp b/lib/Frontend/PCHWriter.cpp
index 80e863b..765fecb 100644
--- a/lib/Frontend/PCHWriter.cpp
+++ b/lib/Frontend/PCHWriter.cpp
@@ -556,6 +556,8 @@ void PCHWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
Record.push_back(LangOpts.NoInline); // Should __NO_INLINE__ be defined.
Record.push_back(LangOpts.AccessControl); // Whether C++ access control should
// be enabled.
+ Record.push_back(LangOpts.CharIsSigned); // Whether char is a signed or
+ // unsigned type
Record.push_back(LangOpts.getGCMode());
Record.push_back(LangOpts.getVisibilityMode());
Record.push_back(LangOpts.InstantiationDepth);
diff --git a/lib/Frontend/PrintParserCallbacks.cpp b/lib/Frontend/PrintParserCallbacks.cpp
index f02d5d4..b9fe068 100644
--- a/lib/Frontend/PrintParserCallbacks.cpp
+++ b/lib/Frontend/PrintParserCallbacks.cpp
@@ -220,6 +220,7 @@ namespace {
}
virtual DeclPtrTy ActOnIvar(Scope *S, SourceLocation DeclStart,
+ DeclPtrTy IntfDecl,
Declarator &D, ExprTy *BitfieldWidth,
tok::ObjCKeywordKind visibility) {
Out << __FUNCTION__ << "\n";
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
index b1c0533..6699c65 100644
--- a/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -20,6 +20,20 @@
#include <algorithm>
using namespace clang;
+static const enum llvm::raw_ostream::Colors noteColor =
+ llvm::raw_ostream::BLACK;
+static const enum llvm::raw_ostream::Colors fixitColor =
+ llvm::raw_ostream::GREEN;
+static const enum llvm::raw_ostream::Colors caretColor =
+ llvm::raw_ostream::GREEN;
+static const enum llvm::raw_ostream::Colors warningColor =
+ llvm::raw_ostream::MAGENTA;
+static const enum llvm::raw_ostream::Colors errorColor = llvm::raw_ostream::RED;
+static const enum llvm::raw_ostream::Colors fatalColor = llvm::raw_ostream::RED;
+// used for changing only the bold attribute
+static const enum llvm::raw_ostream::Colors savedColor =
+ llvm::raw_ostream::SAVEDCOLOR;
+
/// \brief Number of spaces to indent when word-wrapping.
const unsigned WordWrapIndentation = 6;
@@ -396,12 +410,22 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
// Emit what we have computed.
OS << SourceLine << '\n';
+
+ if (UseColors)
+ OS.changeColor(caretColor, true);
OS << CaretLine << '\n';
+ if (UseColors)
+ OS.resetColor();
if (!FixItInsertionLine.empty()) {
+ if (UseColors)
+ // Print fixit line in color
+ OS.changeColor(fixitColor, false);
if (PrintRangeInfo)
OS << ' ';
OS << FixItInsertionLine << '\n';
+ if (UseColors)
+ OS.resetColor();
}
}
@@ -598,6 +622,8 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
// Compute the column number.
if (ShowLocation) {
+ if (UseColors)
+ OS.changeColor(savedColor, true);
OS << PLoc.getFilename() << ':' << LineNo << ':';
if (ShowColumn)
if (unsigned ColNo = PLoc.getColumn())
@@ -638,6 +664,19 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
OS << ':';
}
OS << ' ';
+ if (UseColors)
+ OS.resetColor();
+ }
+ }
+
+ if (UseColors) {
+ // Print diagnostic category in bold and color
+ switch (Level) {
+ case Diagnostic::Ignored: assert(0 && "Invalid diagnostic type");
+ case Diagnostic::Note: OS.changeColor(noteColor, true); break;
+ case Diagnostic::Warning: OS.changeColor(warningColor, true); break;
+ case Diagnostic::Error: OS.changeColor(errorColor, true); break;
+ case Diagnostic::Fatal: OS.changeColor(fatalColor, true); break;
}
}
@@ -648,7 +687,10 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
case Diagnostic::Error: OS << "error: "; break;
case Diagnostic::Fatal: OS << "fatal error: "; break;
}
-
+
+ if (UseColors)
+ OS.resetColor();
+
llvm::SmallString<100> OutStr;
Info.FormatDiagnostic(OutStr);
@@ -659,6 +701,16 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
OutStr += ']';
}
+ if (UseColors) {
+ // Print warnings, errors and fatal errors in bold, no color
+ switch (Level) {
+ case Diagnostic::Warning: OS.changeColor(savedColor, true); break;
+ case Diagnostic::Error: OS.changeColor(savedColor, true); break;
+ case Diagnostic::Fatal: OS.changeColor(savedColor, true); break;
+ default: break; //don't bold notes
+ }
+ }
+
if (MessageLength) {
// We will be word-wrapping the error message, so compute the
// column number where we currently are (after printing the
@@ -669,6 +721,8 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
OS.write(OutStr.begin(), OutStr.size());
}
OS << '\n';
+ if (UseColors)
+ OS.resetColor();
// If caret diagnostics are enabled and we have location, we want to
// emit the caret. However, we only do this if the location moved
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index c96000a..23a61a0 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -40,7 +40,8 @@ typedef char __v16qi __attribute__((__vector_size__(16)));
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_add_sd(__m128d a, __m128d b)
{
- return __builtin_ia32_addsd(a, b);
+ a[0] += b[0];
+ return a;
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
@@ -52,7 +53,8 @@ _mm_add_pd(__m128d a, __m128d b)
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_sub_sd(__m128d a, __m128d b)
{
- return __builtin_ia32_subsd(a, b);
+ a[0] -= b[0];
+ return a;
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
@@ -64,7 +66,8 @@ _mm_sub_pd(__m128d a, __m128d b)
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_mul_sd(__m128d a, __m128d b)
{
- return __builtin_ia32_mulsd(a, b);
+ a[0] *= b[0];
+ return a;
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
@@ -76,7 +79,8 @@ _mm_mul_pd(__m128d a, __m128d b)
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_div_sd(__m128d a, __m128d b)
{
- return __builtin_ia32_divsd(a, b);
+ a[0] /= b[0];
+ return a;
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
@@ -125,25 +129,25 @@ _mm_max_pd(__m128d a, __m128d b)
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_and_pd(__m128d a, __m128d b)
{
- return __builtin_ia32_andpd(a, b);
+ return (__m128d)((__v4si)a & (__v4si)b);
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_andnot_pd(__m128d a, __m128d b)
{
- return __builtin_ia32_andnpd(a, b);
+ return (__m128d)(~(__v4si)a & (__v4si)b);
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_or_pd(__m128d a, __m128d b)
{
- return __builtin_ia32_orpd(a, b);
+ return (__m128d)((__v4si)a | (__v4si)b);
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_xor_pd(__m128d a, __m128d b)
{
- return __builtin_ia32_xorpd(a, b);
+ return (__m128d)((__v4si)a ^ (__v4si)b);
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
@@ -383,19 +387,22 @@ _mm_cvtsd_si32(__m128d a)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsd_ss(__m128 a, __m128d b)
{
- return __builtin_ia32_cvtsd2ss(a, b);
+ a[0] = b[0];
+ return a;
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_sd(__m128d a, int b)
{
- return __builtin_ia32_cvtsi2sd(a, b);
+ a[0] = b;
+ return a;
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtss_sd(__m128d a, __m128 b)
{
- return __builtin_ia32_cvtss2sd(a, b);
+ a[0] = b[0];
+ return a;
}
static inline __m128i __attribute__((__always_inline__, __nodebug__))
@@ -407,7 +414,7 @@ _mm_cvttpd_epi32(__m128d a)
static inline int __attribute__((__always_inline__, __nodebug__))
_mm_cvttsd_si32(__m128d a)
{
- return __builtin_ia32_cvttsd2si(a);
+ return a[0];
}
static inline __m64 __attribute__((__always_inline__, __nodebug__))
@@ -669,7 +676,7 @@ _mm_mulhi_epu16(__m128i a, __m128i b)
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mullo_epi16(__m128i a, __m128i b)
{
- return (__m128i)__builtin_ia32_pmullw128((__v8hi)a, (__v8hi)b);
+ return (__m128i)((__v8hi)a * (__v8hi)b);
}
static inline __m64 __attribute__((__always_inline__, __nodebug__))
@@ -747,25 +754,25 @@ _mm_subs_epu16(__m128i a, __m128i b)
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_and_si128(__m128i a, __m128i b)
{
- return __builtin_ia32_pand128(a, b);
+ return a & b;
}
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_andnot_si128(__m128i a, __m128i b)
{
- return __builtin_ia32_pandn128(a, b);
+ return ~a & b;
}
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_or_si128(__m128i a, __m128i b)
{
- return __builtin_ia32_por128(a, b);
+ return a | b;
}
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_xor_si128(__m128i a, __m128i b)
{
- return __builtin_ia32_pxor128(a, b);
+ return a ^ b;
}
static inline __m128i __attribute__((__always_inline__, __nodebug__))
@@ -934,7 +941,8 @@ _mm_cmplt_epi32(__m128i a, __m128i b)
static inline __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_sd(__m128d a, long long b)
{
- return __builtin_ia32_cvtsi642sd(a, b);
+ a[0] = b;
+ return a;
}
static inline long long __attribute__((__always_inline__, __nodebug__))
@@ -946,7 +954,7 @@ _mm_cvtsd_si64(__m128d a)
static inline long long __attribute__((__always_inline__, __nodebug__))
_mm_cvttsd_si64(__m128d a)
{
- return __builtin_ia32_cvttsd2si64(a);
+ return a[0];
}
#endif
@@ -1181,7 +1189,9 @@ _mm_extract_epi16(__m128i a, int imm)
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_insert_epi16(__m128i a, int b, int imm)
{
- return (__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)a, b, imm);
+ __v8hi c = (__v8hi)a;
+ c[imm & 7] = b;
+ return (__m128i)c;
}
static inline int __attribute__((__always_inline__, __nodebug__))
@@ -1190,9 +1200,20 @@ _mm_movemask_epi8(__m128i a)
return __builtin_ia32_pmovmskb128((__v16qi)a);
}
-#define _mm_shuffle_epi32(a, imm) ((__m128i)__builtin_ia32_pshufd((__v4si)(a), (imm)))
-#define _mm_shufflehi_epi16(a, imm) ((__m128i)__builtin_ia32_pshufhw((__v8hi)(a), (imm)))
-#define _mm_shufflelo_epi16(a, imm) ((__m128i)__builtin_ia32_pshuflw((__v8hi)(a), (imm)))
+#define _mm_shuffle_epi32(a, imm) \
+ ((__m128i)__builtin_shufflevector((__v4si)(a), (__v4si) {0}, \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6))
+#define _mm_shufflelo_epi16(a, imm) \
+ ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) {0}, \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7))
+#define _mm_shufflehi_epi16(a, imm) \
+ ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) {0}, 0, 1, 2, 3, \
+ 4 + ((imm) & 0x3), 4 + ((imm) & 0xc) >> 2, \
+ 4 + ((imm) & 0x30) >> 4, \
+ 4 + ((imm) & 0xc0) >> 6))
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi8(__m128i a, __m128i b)
@@ -1257,7 +1278,7 @@ _mm_movpi64_pi64(__m64 a)
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_move_epi64(__m128i a)
{
- return (__m128i){ a[0], 0 };
+ return __builtin_shufflevector(a, (__m128i){ 0 }, 0, 2);
}
static inline __m128d __attribute__((__always_inline__, __nodebug__))
@@ -1278,7 +1299,8 @@ _mm_movemask_pd(__m128d a)
return __builtin_ia32_movmskpd(a);
}
-#define _mm_shuffle_pd(a, b, i) (__builtin_ia32_shufpd((a), (b), (i)))
+#define _mm_shuffle_pd(a, b, i) (__builtin_shufflevector((a), (b), (i) & 1, \
+ (((i) & 2) >> 1) + 2))
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_castpd_ps(__m128d in)
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
index 339d212..8ea3c47 100644
--- a/lib/Headers/mmintrin.h
+++ b/lib/Headers/mmintrin.h
@@ -415,13 +415,13 @@ _mm_set1_pi32(int __i)
static inline __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi16(short __s)
{
- return (__m64)(__v4hi){ __s };
+ return (__m64)(__v4hi){ __s, __s, __s, __s };
}
static inline __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi8(char __b)
{
- return (__m64)(__v8qi){ __b };
+ return (__m64)(__v8qi){ __b, __b, __b, __b, __b, __b, __b, __b };
}
static inline __m64 __attribute__((__always_inline__, __nodebug__))
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index e9715f1..374a27e 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -114,19 +114,19 @@ _mm_hsub_epi16(__m128i a, __m128i b)
static inline __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsub_epi32(__m128i a, __m128i b)
{
- return (__m128i)__builtin_ia32_psubd128((__v4si)a, (__v4si)b);
+ return (__m128i)__builtin_ia32_phsubd128((__v4si)a, (__v4si)b);
}
static inline __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pi16(__m64 a, __m64 b)
{
- return (__m64)__builtin_ia32_psubw((__v4hi)a, (__v4hi)b);
+ return (__m64)__builtin_ia32_phsubw((__v4hi)a, (__v4hi)b);
}
static inline __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pi32(__m64 a, __m64 b)
{
- return (__m64)__builtin_ia32_psubd((__v2si)a, (__v2si)b);
+ return (__m64)__builtin_ia32_phsubd((__v2si)a, (__v2si)b);
}
static inline __m128i __attribute__((__always_inline__, __nodebug__))
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index c104f63..7291f88 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -38,7 +38,8 @@ typedef float __m128 __attribute__((__vector_size__(16)));
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_add_ss(__m128 a, __m128 b)
{
- return __builtin_ia32_addss(a, b);
+ a[0] += b[0];
+ return a;
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
@@ -50,7 +51,8 @@ _mm_add_ps(__m128 a, __m128 b)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_sub_ss(__m128 a, __m128 b)
{
- return __builtin_ia32_subss(a, b);
+ a[0] -= b[0];
+ return a;
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
@@ -62,7 +64,8 @@ _mm_sub_ps(__m128 a, __m128 b)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_mul_ss(__m128 a, __m128 b)
{
- return __builtin_ia32_mulss(a, b);
+ a[0] *= b[0];
+ return a;
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
@@ -74,7 +77,8 @@ _mm_mul_ps(__m128 a, __m128 b)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_div_ss(__m128 a, __m128 b)
{
- return __builtin_ia32_divss(a, b);
+ a[0] /= b[0];
+ return a;
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
@@ -146,25 +150,29 @@ _mm_max_ps(__m128 a, __m128 b)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_and_ps(__m128 a, __m128 b)
{
- return __builtin_ia32_andps(a, b);
+ typedef int __v4si __attribute__((__vector_size__(16)));
+ return (__m128)((__v4si)a & (__v4si)b);
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_andnot_ps(__m128 a, __m128 b)
{
- return __builtin_ia32_andnps(a, b);
+ typedef int __v4si __attribute__((__vector_size__(16)));
+ return (__m128)(~(__v4si)a & (__v4si)b);
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_or_ps(__m128 a, __m128 b)
{
- return __builtin_ia32_orps(a, b);
+ typedef int __v4si __attribute__((__vector_size__(16)));
+ return (__m128)((__v4si)a | (__v4si)b);
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_xor_ps(__m128 a, __m128 b)
{
- return __builtin_ia32_xorps(a, b);
+ typedef int __v4si __attribute__((__vector_size__(16)));
+ return (__m128)((__v4si)a ^ ~(__v4si)b);
}
static inline __m128 __attribute__((__always_inline__, __nodebug__))
@@ -389,12 +397,16 @@ _mm_cvtss_si32(__m128 a)
return __builtin_ia32_cvtss2si(a);
}
+#ifdef __x86_64__
+
static inline long long __attribute__((__always_inline__, __nodebug__))
_mm_cvtss_si64(__m128 a)
{
return __builtin_ia32_cvtss2si64(a);
}
+#endif
+
static inline __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtps_pi32(__m128 a)
{
@@ -404,13 +416,13 @@ _mm_cvtps_pi32(__m128 a)
static inline int __attribute__((__always_inline__, __nodebug__))
_mm_cvttss_si32(__m128 a)
{
- return __builtin_ia32_cvttss2si(a);
+ return a[0];
}
static inline long long __attribute__((__always_inline__, __nodebug__))
_mm_cvttss_si64(__m128 a)
{
- return __builtin_ia32_cvttss2si64(a);
+ return a[0];
}
static inline __m64 __attribute__((__always_inline__, __nodebug__))
@@ -422,7 +434,8 @@ _mm_cvttps_pi32(__m128 a)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_ss(__m128 a, int b)
{
- return __builtin_ia32_cvtsi2ss(a, b);
+ a[0] = b;
+ return a;
}
#ifdef __x86_64__
@@ -430,7 +443,8 @@ _mm_cvtsi32_ss(__m128 a, int b)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_ss(__m128 a, long long b)
{
- return __builtin_ia32_cvtsi642ss(a, b);
+ a[0] = b;
+ return a;
}
#endif
@@ -456,6 +470,13 @@ _mm_loadh_pi(__m128 a, __m64 const *p)
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_loadl_pi(__m128 a, __m64 const *p)
{
+#if 0
+ // FIXME: This should work, but gives really crappy code at the moment
+ __m128 b;
+ b[0] = *(float*)p;
+ b[1] = *((float*)p+1);
+ return __builtin_shufflevector(a, b, 0, 1, 4, 5);
+#endif
return __builtin_ia32_loadlps(a, (__v2si *)p);
}
@@ -604,26 +625,17 @@ _mm_sfence(void)
static inline int __attribute__((__always_inline__, __nodebug__))
_mm_extract_pi16(__m64 a, int n)
{
- /* FIXME:
- * This should force n to be an immediate.
- * This does not use the PEXTRW instruction. From looking at the LLVM source, the
- instruction doesn't seem to be hooked up.
- * The code could probably be made better :)
- */
__v4hi b = (__v4hi)a;
- return b[(n == 0) ? 0 : (n == 1 ? 1 : (n == 2 ? 2 : 3))];
+ return (unsigned short)b[n & 3];
}
-/* FIXME: Implement this. We could add a __builtin_insertelement function that's similar to
- the already existing __builtin_shufflevector.
-*/
-/*
static inline __m64 __attribute__((__always_inline__, __nodebug__))
_mm_insert_pi16(__m64 a, int d, int n)
{
- return (__m64){ 0LL };
+ __v4hi b = (__v4hi)a;
+ b[n & 3] = d;
+ return (__m64)b;
}
-*/
static inline __m64 __attribute__((__always_inline__, __nodebug__))
_mm_max_pi16(__m64 a, __m64 b)
@@ -661,7 +673,10 @@ _mm_mulhi_pu16(__m64 a, __m64 b)
return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);
}
-#define _mm_shuffle_pi16(a, n) ((__m64)__builtin_ia32_pshufw((__v4hi)a, n))
+#define _mm_shuffle_pi16(a, n) \
+ ((__m64)__builtin_shufflevector((__v4hi)(a), (__v4hi) {0}, \
+ (n) & 0x3, ((n) & 0xc) >> 2, \
+ ((n) & 0x30) >> 4, ((n) & 0xc0) >> 6))
static inline void __attribute__((__always_inline__, __nodebug__))
_mm_maskmove_si64(__m64 d, __m64 n, char *p)
@@ -699,7 +714,10 @@ _mm_setcsr(unsigned int i)
__builtin_ia32_ldmxcsr(i);
}
-#define _mm_shuffle_ps(a, b, mask) (__builtin_ia32_shufps(a, b, mask))
+#define _mm_shuffle_ps(a, b, mask) \
+ (__builtin_shufflevector(a, b, (mask) & 0x3, ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 4, \
+ (((mask) & 0xc0) >> 6) + 4))
static inline __m128 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_ps(__m128 a, __m128 b)
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index 0324c0b..4d10974 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -691,7 +691,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// character constants are not sign extended in the this implementation:
// '\xFF\xFF' = 65536 and '\x0\xFF' = 255, which matches GCC.
if (!IsWide && NumCharsSoFar == 1 && (Value & 128) &&
- PP.getTargetInfo().isCharSigned())
+ PP.getLangOptions().CharIsSigned)
Value = (signed char)Value;
}
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 709e316..c98acc4 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -232,7 +232,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// Set the value.
Val = Literal.getValue();
// Set the signedness.
- Val.setIsUnsigned(!TI.isCharSigned());
+ Val.setIsUnsigned(!PP.getLangOptions().CharIsSigned);
if (Result.Val.getBitWidth() > Val.getBitWidth()) {
Result.Val = Val.extend(Result.Val.getBitWidth());
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index 3014f95..cb7fe58 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -891,6 +891,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
// Install the declarator into interfaceDecl.
DeclPtrTy Field = Actions.ActOnIvar(CurScope,
DS.getSourceRange().getBegin(),
+ interfaceDecl,
FD.D, FD.BitfieldSize, visibility);
AllIvarDecls.push_back(Field);
}
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index 94695e4..58c729a 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -100,6 +100,12 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP, Token &PackTok) {
return;
}
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eom)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) << "pack";
+ return;
+ }
+
SourceLocation RParenLoc = Tok.getLocation();
Actions.ActOnPragmaPack(Kind, Name, Alignment.release(), PackLoc,
LParenLoc, RParenLoc);
@@ -172,7 +178,14 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP, Token &UnusedTok) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_unused_expected_punc);
return;
}
-
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eom)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "unused";
+ return;
+ }
+
// Verify that we have a location for the right parenthesis.
assert(RParenLoc.isValid() && "Valid '#pragma unused' must have ')'");
assert(!Ex.empty() && "Valid '#pragma unused' must have arguments");
@@ -180,3 +193,45 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP, Token &UnusedTok) {
// Perform the action to handle the pragma.
Actions.ActOnPragmaUnused(&Ex[0], Ex.size(), UnusedLoc, LParenLoc, RParenLoc);
}
+
+// #pragma weak identifier
+// #pragma weak identifier '=' identifier
+void PragmaWeakHandler::HandlePragma(Preprocessor &PP, Token &WeakTok) {
+ // FIXME: Should we be expanding macros here? My guess is no.
+ SourceLocation WeakLoc = WeakTok.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) << "weak";
+ return;
+ }
+
+ IdentifierInfo *WeakName = Tok.getIdentifierInfo(), *AliasName = 0;
+ SourceLocation WeakNameLoc = Tok.getLocation(), AliasNameLoc;
+
+ PP.Lex(Tok);
+ if (Tok.is(tok::equal)) {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "weak";
+ return;
+ }
+ AliasName = Tok.getIdentifierInfo();
+ AliasNameLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ }
+
+ if (Tok.isNot(tok::eom)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) << "weak";
+ return;
+ }
+
+ if (AliasName) {
+ Actions.ActOnPragmaWeakAlias(WeakName, AliasName, WeakLoc, WeakNameLoc,
+ AliasNameLoc);
+ } else {
+ Actions.ActOnPragmaWeakID(WeakName, WeakLoc, WeakNameLoc);
+ }
+}
diff --git a/lib/Parse/ParsePragma.h b/lib/Parse/ParsePragma.h
index 31b2a5f..39c86ee 100644
--- a/lib/Parse/ParsePragma.h
+++ b/lib/Parse/ParsePragma.h
@@ -39,6 +39,15 @@ public:
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
};
+class PragmaWeakHandler : public PragmaHandler {
+ Action &Actions;
+public:
+ PragmaWeakHandler(const IdentifierInfo *N, Action &A)
+ : PragmaHandler(N), Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+};
+
} // end namespace clang
#endif
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index 758b662..f041d7d 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -550,6 +550,8 @@ Parser::OwningStmtResult Parser::ParseIfStatement() {
if (ParseParenExprOrCondition(CondExp))
return StmtError();
+ FullExprArg FullCondExp(Actions.FullExpr(CondExp));
+
// C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
// there is no compound stmt. C90 does not have this clause. We only do this
// if the body isn't a compound statement to avoid push/pop in common cases.
@@ -631,7 +633,7 @@ Parser::OwningStmtResult Parser::ParseIfStatement() {
if (ElseStmt.isInvalid())
ElseStmt = Actions.ActOnNullStmt(ElseStmtLoc);
- return Actions.ActOnIfStmt(IfLoc, Actions.FullExpr(CondExp), move(ThenStmt),
+ return Actions.ActOnIfStmt(IfLoc, FullCondExp, move(ThenStmt),
ElseLoc, move(ElseStmt));
}
@@ -752,6 +754,8 @@ Parser::OwningStmtResult Parser::ParseWhileStatement() {
if (ParseParenExprOrCondition(Cond))
return StmtError();
+ FullExprArg FullCond(Actions.FullExpr(Cond));
+
// C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
// there is no compound stmt. C90 does not have this clause. We only do this
// if the body isn't a compound statement to avoid push/pop in common cases.
@@ -776,7 +780,7 @@ Parser::OwningStmtResult Parser::ParseWhileStatement() {
if (Cond.isInvalid() || Body.isInvalid())
return StmtError();
- return Actions.ActOnWhileStmt(WhileLoc, Actions.FullExpr(Cond), move(Body));
+ return Actions.ActOnWhileStmt(WhileLoc, FullCond, move(Body));
}
/// ParseDoStatement
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index 1c2e8a6..a2a66f9 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -39,6 +39,10 @@ Parser::Parser(Preprocessor &pp, Action &actions)
PragmaUnusedHandler(&PP.getIdentifierTable().get("unused"), actions,
*this));
PP.AddPragmaHandler(0, UnusedHandler.get());
+
+ WeakHandler.reset(new
+ PragmaWeakHandler(&PP.getIdentifierTable().get("weak"), actions));
+ PP.AddPragmaHandler(0, WeakHandler.get());
}
/// If a crash happens while the parser is active, print out a line indicating
@@ -288,6 +292,8 @@ Parser::~Parser() {
PackHandler.reset();
PP.RemovePragmaHandler(0, UnusedHandler.get());
UnusedHandler.reset();
+ PP.RemovePragmaHandler(0, WeakHandler.get());
+ WeakHandler.reset();
}
/// Initialize - Warm up the parser.
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 321dac1..85c67df 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -23,6 +23,7 @@ add_clang_library(clangSema
SemaOverload.cpp
SemaStmt.cpp
SemaTemplate.cpp
+ SemaTemplateDeduction.cpp
SemaTemplateInstantiate.cpp
SemaTemplateInstantiateDecl.cpp
SemaTemplateInstantiateExpr.cpp
diff --git a/lib/Sema/Sema.h b/lib/Sema/Sema.h
index c428d29..d3bfef6 100644
--- a/lib/Sema/Sema.h
+++ b/lib/Sema/Sema.h
@@ -20,6 +20,7 @@
#include "SemaOverload.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/Parse/Action.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallVector.h"
@@ -471,6 +472,7 @@ public:
Declarator *D = 0);
virtual DeclPtrTy ActOnIvar(Scope *S, SourceLocation DeclStart,
+ DeclPtrTy IntfDecl,
Declarator &D, ExprTy *BitfieldWidth,
tok::ObjCKeywordKind visibility);
@@ -1045,7 +1047,7 @@ public:
LookupResult LookupName(Scope *S, DeclarationName Name,
LookupNameKind NameKind,
bool RedeclarationOnly = false,
- bool AllowBuiltinCreation = true,
+ bool AllowBuiltinCreation = false,
SourceLocation Loc = SourceLocation());
LookupResult LookupQualifiedName(DeclContext *LookupCtx, DeclarationName Name,
LookupNameKind NameKind,
@@ -1054,7 +1056,7 @@ public:
DeclarationName Name,
LookupNameKind NameKind,
bool RedeclarationOnly = false,
- bool AllowBuiltinCreation = true,
+ bool AllowBuiltinCreation = false,
SourceLocation Loc = SourceLocation());
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II);
@@ -1629,6 +1631,12 @@ public:
TypeTy *Ty,
SourceLocation RParen);
+ /// MaybeCreateCXXExprWithTemporaries - If the list of temporaries is
+ /// non-empty, will create a new CXXExprWithTemporaries expression.
+ /// Otherwise, just returs the passed in expression.
+ Expr *MaybeCreateCXXExprWithTemporaries(Expr *SubExpr,
+ bool DestroyTemps = true);
+
virtual OwningExprResult ActOnFinishFullExpr(ExprArg Expr);
bool RequireCompleteDeclContext(const CXXScopeSpec &SS);
@@ -1971,7 +1979,7 @@ public:
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
SourceLocation RAngleLoc,
- llvm::SmallVectorImpl<TemplateArgument> &Converted);
+ TemplateArgumentListBuilder &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param, QualType Arg,
SourceLocation ArgLoc);
@@ -1980,7 +1988,7 @@ public:
bool CheckTemplateArgumentPointerToMember(Expr *Arg, NamedDecl *&Member);
bool CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *&Arg,
- llvm::SmallVectorImpl<TemplateArgument> *Converted = 0);
+ TemplateArgumentListBuilder *Converted = 0);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, DeclRefExpr *Arg);
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
@@ -2019,16 +2027,9 @@ public:
const IdentifierInfo &II,
SourceRange Range);
- bool DeduceTemplateArguments(QualType Param, QualType Arg,
- llvm::SmallVectorImpl<TemplateArgument> &Deduced);
- bool DeduceTemplateArguments(const TemplateArgument &Param,
- const TemplateArgument &Arg,
- llvm::SmallVectorImpl<TemplateArgument> &Deduced);
- bool DeduceTemplateArguments(const TemplateArgumentList &ParamList,
- const TemplateArgumentList &ArgList,
- llvm::SmallVectorImpl<TemplateArgument> &Deduced);
- bool DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
- const TemplateArgumentList &TemplateArgs);
+ TemplateArgumentList *
+ DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
@@ -2235,7 +2236,7 @@ public:
QualType InstantiateType(QualType T, const TemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
-
+
OwningExprResult InstantiateExpr(Expr *E,
const TemplateArgumentList &TemplateArgs);
@@ -2456,7 +2457,19 @@ public:
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
-
+
+ /// ActOnPragmaWeakID - Called on well formed #pragma weak ident.
+ virtual void ActOnPragmaWeakID(IdentifierInfo* WeakName,
+ SourceLocation PragmaLoc,
+ SourceLocation WeakNameLoc);
+
+ /// ActOnPragmaWeakAlias - Called on well formed #pragma weak ident = ident.
+ virtual void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation WeakNameLoc,
+ SourceLocation AliasNameLoc);
+
/// getPragmaPackAlignment() - Return the current alignment as specified by
/// the current #pragma pack directive, or 0 if none is currently active.
unsigned getPragmaPackAlignment() const;
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 959154c..c67af29 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -575,10 +575,11 @@ void Sema::MergeTypeDefDecl(TypedefDecl *New, Decl *OldD) {
// If we have a redefinition of a typedef in C, emit a warning. This warning
// is normally mapped to an error, but can be controlled with
- // -Wtypedef-redefinition. If either the original was in a system header,
- // don't emit this for compatibility with GCC.
+ // -Wtypedef-redefinition. If either the original or the redefinition is
+ // in a system header, don't emit this for compatibility with GCC.
if (PP.getDiagnostics().getSuppressSystemWarnings() &&
- Context.getSourceManager().isInSystemHeader(Old->getLocation()))
+ (Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
+ Context.getSourceManager().isInSystemHeader(New->getLocation())))
return;
Diag(New->getLocation(), diag::warn_redefinition_of_typedef)
@@ -3913,6 +3914,7 @@ TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
/// in order to create an IvarDecl object for it.
Sema::DeclPtrTy Sema::ActOnIvar(Scope *S,
SourceLocation DeclStart,
+ DeclPtrTy IntfDecl,
Declarator &D, ExprTy *BitfieldWidth,
tok::ObjCKeywordKind Visibility) {
@@ -3951,14 +3953,28 @@ Sema::DeclPtrTy Sema::ActOnIvar(Scope *S,
ObjCIvarDecl::AccessControl ac =
Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility)
: ObjCIvarDecl::None;
-
+ // Must set ivar's DeclContext to its enclosing interface.
+ Decl *EnclosingDecl = IntfDecl.getAs<Decl>();
+ DeclContext *EnclosingContext;
+ if (ObjCImplementationDecl *IMPDecl =
+ dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
+ // Case of ivar declared in an implementation. Context is that of its class.
+ ObjCInterfaceDecl* IDecl = IMPDecl->getClassInterface();
+ assert(IDecl && "No class- ActOnIvar");
+ EnclosingContext = cast_or_null<DeclContext>(IDecl);
+ }
+ else
+ EnclosingContext = dyn_cast<DeclContext>(EnclosingDecl);
+ assert(EnclosingContext && "null DeclContext for ivar - ActOnIvar");
+
// Construct the decl.
- ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, CurContext, Loc, II, T,ac,
+ ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context,
+ EnclosingContext, Loc, II, T,ac,
(Expr *)BitfieldWidth);
if (II) {
NamedDecl *PrevDecl = LookupName(S, II, LookupMemberName, true);
- if (PrevDecl && isDeclInScope(PrevDecl, CurContext, S)
+ if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S)
&& !isa<TagDecl>(PrevDecl)) {
Diag(Loc, diag::err_duplicate_member) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
@@ -4099,7 +4115,11 @@ void Sema::ActOnFields(Scope* S,
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
ID->setIVarList(ClsFields, RecFields.size(), Context);
ID->setLocEnd(RBrac);
-
+ // Add ivar's to class's DeclContext.
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ ClsFields[i]->setLexicalDeclContext(ID);
+ ID->addDecl(Context, ClsFields[i]);
+ }
// Must enforce the rule that ivars in the base classes may not be
// duplicates.
if (ID->getSuperClass()) {
@@ -4120,12 +4140,10 @@ void Sema::ActOnFields(Scope* S,
} else if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
- for (unsigned I = 0, N = RecFields.size(); I != N; ++I) {
- // FIXME: Set the DeclContext correctly when we build the
- // declarations.
+ for (unsigned I = 0, N = RecFields.size(); I != N; ++I)
+ // Ivar declared in @implementation never belongs to the implementation.
+ // Only it is in implementation's lexical context.
ClsFields[I]->setLexicalDeclContext(IMPDecl);
- IMPDecl->addDecl(Context, ClsFields[I]);
- }
CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
}
}
@@ -4413,3 +4431,34 @@ Sema::DeclPtrTy Sema::ActOnFileScopeAsmDecl(SourceLocation Loc,
CurContext->addDecl(Context, New);
return DeclPtrTy::make(New);
}
+
+void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc) {
+ Decl *PrevDecl = LookupName(TUScope, Name, LookupOrdinaryName);
+
+ // FIXME: This implementation is an ugly hack!
+ if (PrevDecl) {
+ PrevDecl->addAttr(::new (Context) WeakAttr());
+ return;
+ }
+ Diag(PragmaLoc, diag::err_unsupported_pragma_weak);
+ return;
+}
+
+void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc,
+ SourceLocation AliasNameLoc) {
+ Decl *PrevDecl = LookupName(TUScope, Name, LookupOrdinaryName);
+
+ // FIXME: This implementation is an ugly hack!
+ if (PrevDecl) {
+ PrevDecl->addAttr(::new (Context) AliasAttr(AliasName->getName()));
+ PrevDecl->addAttr(::new (Context) WeakAttr());
+ return;
+ }
+ Diag(PragmaLoc, diag::err_unsupported_pragma_weak);
+ return;
+}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index f13179f..b59ac87 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -1357,7 +1357,7 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
if (!Constructor->isInvalidDecl() &&
((Constructor->getNumParams() == 1) ||
(Constructor->getNumParams() > 1 &&
- Constructor->getParamDecl(1)->getDefaultArg() != 0))) {
+ Constructor->getParamDecl(1)->hasDefaultArg()))) {
QualType ParamType = Constructor->getParamDecl(0)->getType();
QualType ClassTy = Context.getTagDeclType(ClassDecl);
if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index 8f58034..2500a8f 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -2113,27 +2113,6 @@ bool Sema::CheckObjCDeclScope(Decl *D) {
return true;
}
-/// Collect the instance variables declared in an Objective-C object. Used in
-/// the creation of structures from objects using the @defs directive.
-/// FIXME: This should be consolidated with CollectObjCIvars as it is also
-/// part of the AST generation logic of @defs.
-static void CollectIvars(ObjCInterfaceDecl *Class, RecordDecl *Record,
- ASTContext& Ctx,
- llvm::SmallVectorImpl<Sema::DeclPtrTy> &ivars) {
- if (Class->getSuperClass())
- CollectIvars(Class->getSuperClass(), Record, Ctx, ivars);
-
- // For each ivar, create a fresh ObjCAtDefsFieldDecl.
- for (ObjCInterfaceDecl::ivar_iterator I = Class->ivar_begin(),
- E = Class->ivar_end(); I != E; ++I) {
- ObjCIvarDecl* ID = *I;
- Decl *FD = ObjCAtDefsFieldDecl::Create(Ctx, Record, ID->getLocation(),
- ID->getIdentifier(), ID->getType(),
- ID->getBitWidth());
- ivars.push_back(Sema::DeclPtrTy::make(FD));
- }
-}
-
/// Called whenever @defs(ClassName) is encountered in the source. Inserts the
/// instance variables of ClassName into Decls.
void Sema::ActOnDefs(Scope *S, DeclPtrTy TagD, SourceLocation DeclStart,
@@ -2151,7 +2130,17 @@ void Sema::ActOnDefs(Scope *S, DeclPtrTy TagD, SourceLocation DeclStart,
}
// Collect the instance variables
- CollectIvars(Class, dyn_cast<RecordDecl>(TagD.getAs<Decl>()), Context, Decls);
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+ Context.CollectObjCIvars(Class, RecFields);
+ // For each ivar, create a fresh ObjCAtDefsFieldDecl.
+ for (unsigned i = 0; i < RecFields.size(); i++) {
+ FieldDecl* ID = RecFields[i];
+ RecordDecl *Record = dyn_cast<RecordDecl>(TagD.getAs<Decl>());
+ Decl *FD = ObjCAtDefsFieldDecl::Create(Context, Record, ID->getLocation(),
+ ID->getIdentifier(), ID->getType(),
+ ID->getBitWidth());
+ Decls.push_back(Sema::DeclPtrTy::make(FD));
+ }
// Introduce all of these fields into the appropriate scope.
for (llvm::SmallVectorImpl<DeclPtrTy>::iterator D = Decls.begin();
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 65018da..ed4ac55 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -1588,16 +1588,26 @@ Expr *Sema::RemoveOutermostTemporaryBinding(Expr *E) {
return E;
}
+Expr *Sema::MaybeCreateCXXExprWithTemporaries(Expr *SubExpr,
+ bool DestroyTemps) {
+ assert(SubExpr && "sub expression can't be null!");
+
+ if (ExprTemporaries.empty())
+ return SubExpr;
+
+ Expr *E = CXXExprWithTemporaries::Create(Context, SubExpr,
+ &ExprTemporaries[0],
+ ExprTemporaries.size(),
+ DestroyTemps);
+ ExprTemporaries.clear();
+
+ return E;
+}
+
Sema::OwningExprResult Sema::ActOnFinishFullExpr(ExprArg Arg) {
Expr *FullExpr = Arg.takeAs<Expr>();
-
- if (FullExpr && !ExprTemporaries.empty()) {
- // Create a cleanup expr.
- FullExpr = CXXExprWithTemporaries::Create(Context, FullExpr,
- &ExprTemporaries[0],
- ExprTemporaries.size());
- ExprTemporaries.clear();
- }
+ if (FullExpr)
+ FullExpr = MaybeCreateCXXExprWithTemporaries(FullExpr);
return Owned(FullExpr);
}
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 782a0d8..f9176ca 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -750,6 +750,10 @@ static void CanonicalizeTemplateArguments(const TemplateArgument *TemplateArgs,
Canonical.reserve(NumTemplateArgs);
for (unsigned Idx = 0; Idx < NumTemplateArgs; ++Idx) {
switch (TemplateArgs[Idx].getKind()) {
+ case TemplateArgument::Null:
+ assert(false && "Should never see a NULL template argument here");
+ break;
+
case TemplateArgument::Expression:
// FIXME: Build canonical expression (!)
Canonical.push_back(TemplateArgs[Idx]);
@@ -765,11 +769,13 @@ static void CanonicalizeTemplateArguments(const TemplateArgument *TemplateArgs,
Canonical.push_back(TemplateArgument(SourceLocation(),
*TemplateArgs[Idx].getAsIntegral(),
TemplateArgs[Idx].getIntegralType()));
+ break;
case TemplateArgument::Type: {
QualType CanonType
= Context.getCanonicalType(TemplateArgs[Idx].getAsType());
Canonical.push_back(TemplateArgument(SourceLocation(), CanonType));
+ break;
}
}
}
@@ -805,7 +811,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Check that the template argument list is well-formed for this
// template.
- llvm::SmallVector<TemplateArgument, 16> ConvertedTemplateArgs;
+ TemplateArgumentListBuilder ConvertedTemplateArgs(Context);
if (CheckTemplateArgumentList(Template, TemplateLoc, LAngleLoc,
TemplateArgs, NumTemplateArgs, RAngleLoc,
ConvertedTemplateArgs))
@@ -829,15 +835,16 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// template<typename T, typename U = T> struct A;
TemplateName CanonName = Context.getCanonicalTemplateName(Name);
CanonType = Context.getTemplateSpecializationType(CanonName,
- &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size());
+ ConvertedTemplateArgs.getFlatArgumentList(),
+ ConvertedTemplateArgs.flatSize());
} else if (ClassTemplateDecl *ClassTemplate
= dyn_cast<ClassTemplateDecl>(Template)) {
// Find the class template specialization declaration that
// corresponds to these arguments.
llvm::FoldingSetNodeID ID;
- ClassTemplateSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size());
+ ClassTemplateSpecializationDecl::Profile(ID,
+ ConvertedTemplateArgs.getFlatArgumentList(),
+ ConvertedTemplateArgs.flatSize());
void *InsertPos = 0;
ClassTemplateSpecializationDecl *Decl
= ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
@@ -846,12 +853,10 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// specialization. Create the canonical declaration and add it to
// the set of specializations.
Decl = ClassTemplateSpecializationDecl::Create(Context,
- ClassTemplate->getDeclContext(),
- TemplateLoc,
- ClassTemplate,
- &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size(),
- 0);
+ ClassTemplate->getDeclContext(),
+ TemplateLoc,
+ ClassTemplate,
+ ConvertedTemplateArgs, 0);
ClassTemplate->getSpecializations().InsertNode(Decl, InsertPos);
Decl->setLexicalDeclContext(CurContext);
}
@@ -949,7 +954,7 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs,
SourceLocation RAngleLoc,
- llvm::SmallVectorImpl<TemplateArgument> &Converted) {
+ TemplateArgumentListBuilder &Converted) {
TemplateParameterList *Params = Template->getTemplateParameters();
unsigned NumParams = Params->size();
unsigned NumArgs = NumTemplateArgs;
@@ -998,13 +1003,13 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// on the previously-computed template arguments.
if (ArgType->isDependentType()) {
InstantiatingTemplate Inst(*this, TemplateLoc,
- Template, &Converted[0],
- Converted.size(),
+ Template, Converted.getFlatArgumentList(),
+ Converted.flatSize(),
SourceRange(TemplateLoc, RAngleLoc));
- TemplateArgumentList TemplateArgs(Context, &Converted[0],
- Converted.size(),
- /*CopyArgs=*/false);
+ TemplateArgumentList TemplateArgs(Context, Converted,
+ /*CopyArgs=*/false,
+ /*FlattenArgs=*/false);
ArgType = InstantiateType(ArgType, TemplateArgs,
TTP->getDefaultArgumentLoc(),
TTP->getDeclName());
@@ -1069,13 +1074,13 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
if (NTTPType->isDependentType()) {
// Instantiate the type of the non-type template parameter.
InstantiatingTemplate Inst(*this, TemplateLoc,
- Template, &Converted[0],
- Converted.size(),
+ Template, Converted.getFlatArgumentList(),
+ Converted.flatSize(),
SourceRange(TemplateLoc, RAngleLoc));
- TemplateArgumentList TemplateArgs(Context, &Converted[0],
- Converted.size(),
- /*CopyArgs=*/false);
+ TemplateArgumentList TemplateArgs(Context, Converted,
+ /*CopyArgs=*/false,
+ /*FlattenArgs=*/false);
NTTPType = InstantiateType(NTTPType, TemplateArgs,
NTTP->getLocation(),
NTTP->getDeclName());
@@ -1092,6 +1097,10 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
}
switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ assert(false && "Should never see a NULL template argument here");
+ break;
+
case TemplateArgument::Expression: {
Expr *E = Arg.getAsExpr();
if (CheckTemplateArgument(NTTP, NTTPType, E, &Converted))
@@ -1131,6 +1140,10 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
= cast<TemplateTemplateParmDecl>(*Param);
switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ assert(false && "Should never see a NULL template argument here");
+ break;
+
case TemplateArgument::Expression: {
Expr *ArgExpr = Arg.getAsExpr();
if (ArgExpr && isa<DeclRefExpr>(ArgExpr) &&
@@ -1379,7 +1392,7 @@ Sema::CheckTemplateArgumentPointerToMember(Expr *Arg, NamedDecl *&Member) {
/// of this argument will be added to the end of the Converted vector.
bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *&Arg,
- llvm::SmallVectorImpl<TemplateArgument> *Converted) {
+ TemplateArgumentListBuilder *Converted) {
SourceLocation StartLoc = Arg->getSourceRange().getBegin();
// If either the parameter has a dependent type or the argument is
@@ -2051,7 +2064,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagKind TK,
// Check that the template argument list is well-formed for this
// template.
- llvm::SmallVector<TemplateArgument, 16> ConvertedTemplateArgs;
+ TemplateArgumentListBuilder ConvertedTemplateArgs(Context);
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, LAngleLoc,
&TemplateArgs[0], TemplateArgs.size(),
RAngleLoc, ConvertedTemplateArgs))
@@ -2066,11 +2079,13 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagKind TK,
llvm::FoldingSetNodeID ID;
if (isPartialSpecialization)
// FIXME: Template parameter list matters, too
- ClassTemplatePartialSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size());
+ ClassTemplatePartialSpecializationDecl::Profile(ID,
+ ConvertedTemplateArgs.getFlatArgumentList(),
+ ConvertedTemplateArgs.flatSize());
else
- ClassTemplateSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size());
+ ClassTemplateSpecializationDecl::Profile(ID,
+ ConvertedTemplateArgs.getFlatArgumentList(),
+ ConvertedTemplateArgs.flatSize());
void *InsertPos = 0;
ClassTemplateSpecializationDecl *PrevDecl = 0;
@@ -2111,12 +2126,11 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagKind TK,
ClassTemplatePartialSpecializationDecl *Partial
= ClassTemplatePartialSpecializationDecl::Create(Context,
ClassTemplate->getDeclContext(),
- TemplateNameLoc,
- TemplateParams,
- ClassTemplate,
- &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size(),
- PrevPartial);
+ TemplateNameLoc,
+ TemplateParams,
+ ClassTemplate,
+ ConvertedTemplateArgs,
+ PrevPartial);
if (PrevPartial) {
ClassTemplate->getPartialSpecializations().RemoveNode(PrevPartial);
@@ -2132,9 +2146,8 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagKind TK,
= ClassTemplateSpecializationDecl::Create(Context,
ClassTemplate->getDeclContext(),
TemplateNameLoc,
- ClassTemplate,
- &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size(),
+ ClassTemplate,
+ ConvertedTemplateArgs,
PrevDecl);
if (PrevDecl) {
@@ -2255,9 +2268,9 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
// Check that the template argument list is well-formed for this
// template.
- llvm::SmallVector<TemplateArgument, 16> ConvertedTemplateArgs;
+ TemplateArgumentListBuilder ConvertedTemplateArgs(Context);
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, LAngleLoc,
- &TemplateArgs[0], TemplateArgs.size(),
+ TemplateArgs.data(), TemplateArgs.size(),
RAngleLoc, ConvertedTemplateArgs))
return true;
@@ -2268,8 +2281,9 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
// Find the class template specialization declaration that
// corresponds to these arguments.
llvm::FoldingSetNodeID ID;
- ClassTemplateSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size());
+ ClassTemplateSpecializationDecl::Profile(ID,
+ ConvertedTemplateArgs.getFlatArgumentList(),
+ ConvertedTemplateArgs.flatSize());
void *InsertPos = 0;
ClassTemplateSpecializationDecl *PrevDecl
= ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
@@ -2312,9 +2326,7 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
ClassTemplate->getDeclContext(),
TemplateNameLoc,
ClassTemplate,
- &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size(),
- 0);
+ ConvertedTemplateArgs, 0);
Specialization->setLexicalDeclContext(CurContext);
CurContext->addDecl(Context, Specialization);
return DeclPtrTy::make(Specialization);
@@ -2340,9 +2352,7 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
ClassTemplate->getDeclContext(),
TemplateNameLoc,
ClassTemplate,
- &ConvertedTemplateArgs[0],
- ConvertedTemplateArgs.size(),
- 0);
+ ConvertedTemplateArgs, 0);
ClassTemplate->getSpecializations().InsertNode(Specialization,
InsertPos);
@@ -2357,7 +2367,7 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
// arguments in the specialization.
QualType WrittenTy
= Context.getTemplateSpecializationType(Name,
- &TemplateArgs[0],
+ TemplateArgs.data(),
TemplateArgs.size(),
Context.getTypeDeclType(Specialization));
Specialization->setTypeAsWritten(WrittenTy);
@@ -2563,89 +2573,3 @@ Sema::CheckTypenameType(NestedNameSpecifier *NNS, const IdentifierInfo &II,
<< Name;
return QualType();
}
-
-// FIXME: Move to SemaTemplateDeduction.cpp
-bool
-Sema::DeduceTemplateArguments(QualType Param, QualType Arg,
- llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
- // We only want to look at the canonical types, since typedefs and
- // sugar are not part of template argument deduction.
- Param = Context.getCanonicalType(Param);
- Arg = Context.getCanonicalType(Arg);
-
- // If the parameter type is not dependent, just compare the types
- // directly.
- if (!Param->isDependentType())
- return Param == Arg;
-
- // FIXME: Use a visitor or switch to handle all of the kinds of
- // types that the parameter may be.
- if (const TemplateTypeParmType *TemplateTypeParm
- = Param->getAsTemplateTypeParmType()) {
- (void)TemplateTypeParm; // FIXME: use this
- // The argument type can not be less qualified than the parameter
- // type.
- if (Param.isMoreQualifiedThan(Arg))
- return false;
-
- unsigned Quals = Arg.getCVRQualifiers() & ~Param.getCVRQualifiers();
- QualType DeducedType = Arg.getQualifiedType(Quals);
- // FIXME: actually save the deduced type, and check that this
- // deduction is consistent.
- return true;
- }
-
- if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
- return false;
-
- if (const PointerType *PointerParam = Param->getAsPointerType()) {
- const PointerType *PointerArg = Arg->getAsPointerType();
- if (!PointerArg)
- return false;
-
- return DeduceTemplateArguments(PointerParam->getPointeeType(),
- PointerArg->getPointeeType(),
- Deduced);
- }
-
- // FIXME: Many more cases to go (to go).
- return false;
-}
-
-bool
-Sema::DeduceTemplateArguments(const TemplateArgument &Param,
- const TemplateArgument &Arg,
- llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
- assert(Param.getKind() == Arg.getKind() &&
- "Template argument kind mismatch during deduction");
- switch (Param.getKind()) {
- case TemplateArgument::Type:
- return DeduceTemplateArguments(Param.getAsType(), Arg.getAsType(),
- Deduced);
-
- default:
- return false;
- }
-}
-
-bool
-Sema::DeduceTemplateArguments(const TemplateArgumentList &ParamList,
- const TemplateArgumentList &ArgList,
- llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
- assert(ParamList.size() == ArgList.size());
- for (unsigned I = 0, N = ParamList.size(); I != N; ++I) {
- if (!DeduceTemplateArguments(ParamList[I], ArgList[I], Deduced))
- return false;
- }
- return true;
-}
-
-
-bool
-Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
- const TemplateArgumentList &TemplateArgs) {
- llvm::SmallVector<TemplateArgument, 4> Deduced;
- Deduced.resize(Partial->getTemplateParameters()->size());
- return DeduceTemplateArguments(Partial->getTemplateArgs(), TemplateArgs,
- Deduced);
-}
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
new file mode 100644
index 0000000..812b319
--- /dev/null
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -0,0 +1,395 @@
+//===------- SemaTemplateDeduction.cpp - Template Argument Deduction ------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template argument deduction.
+//
+//===----------------------------------------------------------------------===/
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Parse/DeclSpec.h"
+#include "llvm/Support/Compiler.h"
+using namespace clang;
+
+/// \brief If the given expression is of a form that permits the deduction
+/// of a non-type template parameter, return the declaration of that
+/// non-type template parameter.
+static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
+ if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
+ E = IC->getSubExpr();
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+
+ return 0;
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given constant.
+///
+/// \returns true if deduction succeeded, false otherwise.
+static bool DeduceNonTypeTemplateArgument(ASTContext &Context,
+ NonTypeTemplateParmDecl *NTTP,
+ llvm::APInt Value,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+
+ if (Deduced[NTTP->getIndex()].isNull()) {
+ Deduced[NTTP->getIndex()] = TemplateArgument(SourceLocation(),
+ llvm::APSInt(Value),
+ NTTP->getType());
+ return true;
+ }
+
+ if (Deduced[NTTP->getIndex()].getKind() != TemplateArgument::Integral)
+ return false;
+
+ // If the template argument was previously deduced to a negative value,
+ // then our deduction fails.
+ const llvm::APSInt *PrevValuePtr = Deduced[NTTP->getIndex()].getAsIntegral();
+ assert(PrevValuePtr && "Not an integral template argument?");
+ if (PrevValuePtr->isSigned() && PrevValuePtr->isNegative())
+ return false;
+
+ llvm::APInt PrevValue = *PrevValuePtr;
+ if (Value.getBitWidth() > PrevValue.getBitWidth())
+ PrevValue.zext(Value.getBitWidth());
+ else if (Value.getBitWidth() < PrevValue.getBitWidth())
+ Value.zext(PrevValue.getBitWidth());
+ return Value == PrevValue;
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given type- or value-dependent expression.
+///
+/// \returns true if deduction succeeded, false otherwise.
+
+static bool DeduceNonTypeTemplateArgument(ASTContext &Context,
+ NonTypeTemplateParmDecl *NTTP,
+ Expr *Value,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+ assert((Value->isTypeDependent() || Value->isValueDependent()) &&
+ "Expression template argument must be type- or value-dependent.");
+
+ if (Deduced[NTTP->getIndex()].isNull()) {
+ // FIXME: Clone the Value?
+ Deduced[NTTP->getIndex()] = TemplateArgument(Value);
+ return true;
+ }
+
+ if (Deduced[NTTP->getIndex()].getKind() == TemplateArgument::Integral) {
+ // Okay, we deduced a constant in one case and a dependent expression
+ // in another case. FIXME: Later, we will check that instantiating the
+ // dependent expression gives us the constant value.
+ return true;
+ }
+
+ // FIXME: Compare the expressions for equality!
+ return true;
+}
+
+static bool DeduceTemplateArguments(ASTContext &Context, QualType Param,
+ QualType Arg,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ // We only want to look at the canonical types, since typedefs and
+ // sugar are not part of template argument deduction.
+ Param = Context.getCanonicalType(Param);
+ Arg = Context.getCanonicalType(Arg);
+
+ // If the parameter type is not dependent, just compare the types
+ // directly.
+ if (!Param->isDependentType())
+ return Param == Arg;
+
+ // C++ [temp.deduct.type]p9:
+ //
+ // A template type argument T, a template template argument TT or a
+ // template non-type argument i can be deduced if P and A have one of
+ // the following forms:
+ //
+ // T
+ // cv-list T
+ if (const TemplateTypeParmType *TemplateTypeParm
+ = Param->getAsTemplateTypeParmType()) {
+ // The argument type can not be less qualified than the parameter
+ // type.
+ if (Param.isMoreQualifiedThan(Arg))
+ return false;
+
+ assert(TemplateTypeParm->getDepth() == 0 && "Can't deduce with depth > 0");
+
+ unsigned Quals = Arg.getCVRQualifiers() & ~Param.getCVRQualifiers();
+ QualType DeducedType = Arg.getQualifiedType(Quals);
+ unsigned Index = TemplateTypeParm->getIndex();
+
+ if (Deduced[Index].isNull())
+ Deduced[Index] = TemplateArgument(SourceLocation(), DeducedType);
+ else {
+ // C++ [temp.deduct.type]p2:
+ // [...] If type deduction cannot be done for any P/A pair, or if for
+ // any pair the deduction leads to more than one possible set of
+ // deduced values, or if different pairs yield different deduced
+ // values, or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ if (Deduced[Index].getAsType() != DeducedType)
+ return false;
+ }
+ return true;
+ }
+
+ if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
+ return false;
+
+ switch (Param->getTypeClass()) {
+ // No deduction possible for these types
+ case Type::Builtin:
+ return false;
+
+
+ // T *
+ case Type::Pointer: {
+ const PointerType *PointerArg = Arg->getAsPointerType();
+ if (!PointerArg)
+ return false;
+
+ return DeduceTemplateArguments(Context,
+ cast<PointerType>(Param)->getPointeeType(),
+ PointerArg->getPointeeType(),
+ Deduced);
+ }
+
+ // T &
+ case Type::LValueReference: {
+ const LValueReferenceType *ReferenceArg = Arg->getAsLValueReferenceType();
+ if (!ReferenceArg)
+ return false;
+
+ return DeduceTemplateArguments(Context,
+ cast<LValueReferenceType>(Param)->getPointeeType(),
+ ReferenceArg->getPointeeType(),
+ Deduced);
+ }
+
+ // T && [C++0x]
+ case Type::RValueReference: {
+ const RValueReferenceType *ReferenceArg = Arg->getAsRValueReferenceType();
+ if (!ReferenceArg)
+ return false;
+
+ return DeduceTemplateArguments(Context,
+ cast<RValueReferenceType>(Param)->getPointeeType(),
+ ReferenceArg->getPointeeType(),
+ Deduced);
+ }
+
+ // T [] (implied, but not stated explicitly)
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *IncompleteArrayArg =
+ Context.getAsIncompleteArrayType(Arg);
+ if (!IncompleteArrayArg)
+ return false;
+
+ return DeduceTemplateArguments(Context,
+ Context.getAsIncompleteArrayType(Param)->getElementType(),
+ IncompleteArrayArg->getElementType(),
+ Deduced);
+ }
+
+ // T [integer-constant]
+ case Type::ConstantArray: {
+ const ConstantArrayType *ConstantArrayArg =
+ Context.getAsConstantArrayType(Arg);
+ if (!ConstantArrayArg)
+ return false;
+
+ const ConstantArrayType *ConstantArrayParm =
+ Context.getAsConstantArrayType(Param);
+ if (ConstantArrayArg->getSize() != ConstantArrayParm->getSize())
+ return false;
+
+ return DeduceTemplateArguments(Context,
+ ConstantArrayParm->getElementType(),
+ ConstantArrayArg->getElementType(),
+ Deduced);
+ }
+
+ // type [i]
+ case Type::DependentSizedArray: {
+ const ArrayType *ArrayArg = dyn_cast<ArrayType>(Arg);
+ if (!ArrayArg)
+ return false;
+
+ // Check the element type of the arrays
+ const DependentSizedArrayType *DependentArrayParm
+ = cast<DependentSizedArrayType>(Param);
+ if (!DeduceTemplateArguments(Context,
+ DependentArrayParm->getElementType(),
+ ArrayArg->getElementType(),
+ Deduced))
+ return false;
+
+ // Determine the array bound is something we can deduce.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(DependentArrayParm->getSizeExpr());
+ if (!NTTP)
+ return true;
+
+ // We can perform template argument deduction for the given non-type
+ // template parameter.
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument at depth > 0");
+ if (const ConstantArrayType *ConstantArrayArg
+ = dyn_cast<ConstantArrayType>(ArrayArg))
+ return DeduceNonTypeTemplateArgument(Context, NTTP,
+ ConstantArrayArg->getSize(),
+ Deduced);
+ if (const DependentSizedArrayType *DependentArrayArg
+ = dyn_cast<DependentSizedArrayType>(ArrayArg))
+ return DeduceNonTypeTemplateArgument(Context, NTTP,
+ DependentArrayArg->getSizeExpr(),
+ Deduced);
+
+ // Incomplete type does not match a dependently-sized array type
+ return false;
+ }
+
+ default:
+ break;
+ }
+
+ // FIXME: Many more cases to go (to go).
+ return false;
+}
+
+static bool
+DeduceTemplateArguments(ASTContext &Context, const TemplateArgument &Param,
+ const TemplateArgument &Arg,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ switch (Param.getKind()) {
+ case TemplateArgument::Null:
+ assert(false && "Null template argument in parameter list");
+ break;
+
+ case TemplateArgument::Type:
+ assert(Arg.getKind() == TemplateArgument::Type && "Type/value mismatch");
+ return DeduceTemplateArguments(Context, Param.getAsType(),
+ Arg.getAsType(), Deduced);
+
+ case TemplateArgument::Declaration:
+ // FIXME: Implement this check
+ assert(false && "Unimplemented template argument deduction case");
+ return false;
+
+ case TemplateArgument::Integral:
+ if (Arg.getKind() == TemplateArgument::Integral) {
+ // FIXME: Zero extension + sign checking here?
+ return *Param.getAsIntegral() == *Arg.getAsIntegral();
+ }
+ if (Arg.getKind() == TemplateArgument::Expression)
+ return false;
+
+ assert(false && "Type/value mismatch");
+ return false;
+
+ case TemplateArgument::Expression: {
+ if (NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(Param.getAsExpr())) {
+ if (Arg.getKind() == TemplateArgument::Integral)
+ // FIXME: Sign problems here
+ return DeduceNonTypeTemplateArgument(Context, NTTP,
+ *Arg.getAsIntegral(), Deduced);
+ if (Arg.getKind() == TemplateArgument::Expression)
+ return DeduceNonTypeTemplateArgument(Context, NTTP, Arg.getAsExpr(),
+ Deduced);
+
+ assert(false && "Type/value mismatch");
+ return false;
+ }
+
+ // Can't deduce anything, but that's okay.
+ return true;
+ }
+ }
+
+ return true;
+}
+
+static bool
+DeduceTemplateArguments(ASTContext &Context,
+ const TemplateArgumentList &ParamList,
+ const TemplateArgumentList &ArgList,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ assert(ParamList.size() == ArgList.size());
+ for (unsigned I = 0, N = ParamList.size(); I != N; ++I) {
+ if (!DeduceTemplateArguments(Context, ParamList[I], ArgList[I], Deduced))
+ return false;
+ }
+ return true;
+}
+
+
+TemplateArgumentList *
+Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs) {
+ // Deduce the template arguments for the partial specialization
+ llvm::SmallVector<TemplateArgument, 4> Deduced;
+ Deduced.resize(Partial->getTemplateParameters()->size());
+ if (! ::DeduceTemplateArguments(Context, Partial->getTemplateArgs(),
+ TemplateArgs, Deduced))
+ return 0;
+
+ // FIXME: Substitute the deduced template arguments into the template
+ // arguments of the class template partial specialization; the resulting
+ // template arguments should match TemplateArgs exactly.
+
+ for (unsigned I = 0, N = Deduced.size(); I != N; ++I) {
+ TemplateArgument &Arg = Deduced[I];
+
+ // FIXME: If this template argument was not deduced, but the corresponding
+ // template parameter has a default argument, instantiate the default
+ // argument.
+ if (Arg.isNull()) // FIXME: Result->Destroy(Context);
+ return 0;
+
+ if (Arg.getKind() == TemplateArgument::Integral) {
+ // FIXME: Instantiate the type, but we need some context!
+ const NonTypeTemplateParmDecl *Parm
+ = cast<NonTypeTemplateParmDecl>(Partial->getTemplateParameters()
+ ->getParam(I));
+ // QualType T = InstantiateType(Parm->getType(), *Result,
+ // Parm->getLocation(), Parm->getDeclName());
+ // if (T.isNull()) // FIXME: Result->Destroy(Context);
+ // return 0;
+ QualType T = Parm->getType();
+
+ // FIXME: Make sure we didn't overflow our data type!
+ llvm::APSInt &Value = *Arg.getAsIntegral();
+ unsigned AllowedBits = Context.getTypeSize(T);
+ if (Value.getBitWidth() != AllowedBits)
+ Value.extOrTrunc(AllowedBits);
+ Value.setIsSigned(T->isSignedIntegerType());
+ Arg.setIntegralType(T);
+ }
+ }
+
+ // FIXME: This is terrible. DeduceTemplateArguments should use a
+ // TemplateArgumentListBuilder directly.
+ TemplateArgumentListBuilder Builder(Context);
+ for (unsigned I = 0, N = Deduced.size(); I != N; ++I)
+ Builder.push_back(Deduced[I]);
+
+ return new (Context) TemplateArgumentList(Context, Builder, /*CopyArgs=*/true,
+ /*FlattenArgs=*/true);
+}
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index d3d771b..562749e 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -503,6 +503,10 @@ InstantiateTemplateSpecializationType(
for (TemplateSpecializationType::iterator Arg = T->begin(), ArgEnd = T->end();
Arg != ArgEnd; ++Arg) {
switch (Arg->getKind()) {
+ case TemplateArgument::Null:
+ assert(false && "Should never have a NULL template argument");
+ break;
+
case TemplateArgument::Type: {
QualType T = SemaRef.InstantiateType(Arg->getAsType(),
TemplateArgs,
@@ -829,21 +833,23 @@ Sema::InstantiateClassTemplateSpecialization(
// Determine whether any class template partial specializations
// match the given template arguments.
- llvm::SmallVector<ClassTemplatePartialSpecializationDecl *, 4> Matched;
+ typedef std::pair<ClassTemplatePartialSpecializationDecl *,
+ TemplateArgumentList *> MatchResult;
+ llvm::SmallVector<MatchResult, 4> Matched;
for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
Partial = Template->getPartialSpecializations().begin(),
PartialEnd = Template->getPartialSpecializations().end();
Partial != PartialEnd;
++Partial) {
- if (DeduceTemplateArguments(&*Partial, ClassTemplateSpec->getTemplateArgs()))
- Matched.push_back(&*Partial);
+ if (TemplateArgumentList *Deduced
+ = DeduceTemplateArguments(&*Partial,
+ ClassTemplateSpec->getTemplateArgs()))
+ Matched.push_back(std::make_pair(&*Partial, Deduced));
}
if (Matched.size() == 1) {
- Pattern = Matched[0];
- // FIXME: set TemplateArgs to the template arguments of the
- // partial specialization, instantiated with the deduced template
- // arguments.
+ Pattern = Matched[0].first;
+ TemplateArgs = Matched[0].second;
} else if (Matched.size() > 1) {
// FIXME: Implement partial ordering of class template partial
// specializations.
@@ -856,9 +862,17 @@ Sema::InstantiateClassTemplateSpecialization(
ExplicitInstantiation? TSK_ExplicitInstantiation
: TSK_ImplicitInstantiation);
- return InstantiateClass(ClassTemplateSpec->getLocation(),
- ClassTemplateSpec, Pattern, *TemplateArgs,
- ExplicitInstantiation);
+ bool Result = InstantiateClass(ClassTemplateSpec->getLocation(),
+ ClassTemplateSpec, Pattern, *TemplateArgs,
+ ExplicitInstantiation);
+
+ for (unsigned I = 0, N = Matched.size(); I != N; ++I) {
+ // FIXME: Implement TemplateArgumentList::Destroy!
+ // if (Matched[I].first != Pattern)
+ // Matched[I].second->Destroy(Context);
+ }
+
+ return Result;
}
/// \brief Instantiate the definitions of all of the member of the
diff --git a/lib/Sema/SemaTemplateInstantiateExpr.cpp b/lib/Sema/SemaTemplateInstantiateExpr.cpp
index a6b9703..5ba42f2 100644
--- a/lib/Sema/SemaTemplateInstantiateExpr.cpp
+++ b/lib/Sema/SemaTemplateInstantiateExpr.cpp
@@ -119,12 +119,13 @@ TemplateExprInstantiator::VisitDeclRefExpr(DeclRefExpr *E) {
T->isWideCharType(),
T,
E->getSourceRange().getBegin()));
- else if (T->isBooleanType())
+ if (T->isBooleanType())
return SemaRef.Owned(new (SemaRef.Context) CXXBoolLiteralExpr(
Arg.getAsIntegral()->getBoolValue(),
T,
E->getSourceRange().getBegin()));
+ assert(Arg.getAsIntegral()->getBitWidth() == SemaRef.Context.getIntWidth(T));
return SemaRef.Owned(new (SemaRef.Context) IntegerLiteral(
*Arg.getAsIntegral(),
T,
diff --git a/lib/Sema/SemaTemplateInstantiateStmt.cpp b/lib/Sema/SemaTemplateInstantiateStmt.cpp
index 1f69479..fd349df 100644
--- a/lib/Sema/SemaTemplateInstantiateStmt.cpp
+++ b/lib/Sema/SemaTemplateInstantiateStmt.cpp
@@ -194,6 +194,8 @@ Sema::OwningStmtResult TemplateStmtInstantiator::VisitIfStmt(IfStmt *S) {
if (Cond.isInvalid())
return SemaRef.StmtError();
+ Sema::FullExprArg FullCond(FullExpr(Cond));
+
// Instantiate the "then" branch.
OwningStmtResult Then = SemaRef.InstantiateStmt(S->getThen(), TemplateArgs);
if (Then.isInvalid())
@@ -204,7 +206,7 @@ Sema::OwningStmtResult TemplateStmtInstantiator::VisitIfStmt(IfStmt *S) {
if (Else.isInvalid())
return SemaRef.StmtError();
- return SemaRef.ActOnIfStmt(S->getIfLoc(), FullExpr(Cond), move(Then),
+ return SemaRef.ActOnIfStmt(S->getIfLoc(), FullCond, move(Then),
S->getElseLoc(), move(Else));
}
@@ -236,12 +238,14 @@ Sema::OwningStmtResult TemplateStmtInstantiator::VisitWhileStmt(WhileStmt *S) {
if (Cond.isInvalid())
return SemaRef.StmtError();
+ Sema::FullExprArg FullCond(FullExpr(Cond));
+
// Instantiate the body
OwningStmtResult Body = SemaRef.InstantiateStmt(S->getBody(), TemplateArgs);
if (Body.isInvalid())
return SemaRef.StmtError();
- return SemaRef.ActOnWhileStmt(S->getWhileLoc(), FullExpr(Cond), move(Body));
+ return SemaRef.ActOnWhileStmt(S->getWhileLoc(), FullCond, move(Body));
}
Sema::OwningStmtResult TemplateStmtInstantiator::VisitDoStmt(DoStmt *S) {
OpenPOWER on IntegriCloud