summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp621
1 files changed, 444 insertions, 177 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 6ae2d0c..4455f1a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -17,6 +17,7 @@
#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -66,29 +67,39 @@ static CanQualType GetReturnType(QualType RetTy) {
return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
}
+/// Arrange the argument and result information for a value of the
+/// given unprototyped function type.
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
- return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
- SmallVector<CanQualType, 16>(),
- FTNP->getExtInfo());
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+ // When translating an unprototyped function type, always use a
+ // variadic type.
+ return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
+ ArrayRef<CanQualType>(),
+ FTNP->getExtInfo(),
+ RequiredArgs(0));
}
-/// \param Args - contains any initial parameters besides those
-/// in the formal type
-static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &ArgTys,
+/// Arrange the argument and result information for a value of the
+/// given function type, on top of any implicit parameters already
+/// stored.
+static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &argTypes,
CanQual<FunctionProtoType> FTP) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
- CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
- return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ argTypes.push_back(FTP->getArgType(i));
+ CanQualType resultType = FTP->getResultType().getUnqualifiedType();
+ return CGT.arrangeFunctionType(resultType, argTypes,
+ FTP->getExtInfo(), required);
}
+/// Arrange the argument and result information for a value of the
+/// given function type.
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
- SmallVector<CanQualType, 16> ArgTys;
- return ::getFunctionInfo(*this, ArgTys, FTP);
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
+ SmallVector<CanQualType, 16> argTypes;
+ return ::arrangeFunctionType(*this, argTypes, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -111,162 +122,250 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
return CC_C;
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
- const FunctionProtoType *FTP) {
- SmallVector<CanQualType, 16> ArgTys;
+/// Arrange the argument and result information for a call to an
+/// unknown C++ non-static member function of the given abstract type.
+/// The member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
- ArgTys.push_back(GetThisType(Context, RD));
+ argTypes.push_back(GetThisType(Context, RD));
- return ::getFunctionInfo(*this, ArgTys,
+ return ::arrangeFunctionType(*this, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
- SmallVector<CanQualType, 16> ArgTys;
-
+/// Arrange the argument and result information for a declaration or
+/// definition of the given C++ non-static member function. The
+/// member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
- // Add the 'this' pointer unless this is a static method.
- if (MD->isInstance())
- ArgTys.push_back(GetThisType(Context, MD->getParent()));
+ CanQual<FunctionProtoType> prototype = GetFormalType(MD);
+
+ if (MD->isInstance()) {
+ // The abstract case is perfectly fine.
+ return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
+ }
- return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
+ return arrangeFunctionType(prototype);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
- CXXCtorType Type) {
- SmallVector<CanQualType, 16> ArgTys;
- ArgTys.push_back(GetThisType(Context, D->getParent()));
- CanQualType ResTy = Context.VoidTy;
+/// Arrange the argument and result information for a declaration
+/// or definition to the given constructor variant.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
+ CXXCtorType ctorKind) {
+ SmallVector<CanQualType, 16> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
- TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
+ TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+
// Add the formal parameters.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
+ argTypes.push_back(FTP->getArgType(i));
- return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
- CXXDtorType Type) {
- SmallVector<CanQualType, 2> ArgTys;
- ArgTys.push_back(GetThisType(Context, D->getParent()));
- CanQualType ResTy = Context.VoidTy;
+/// Arrange the argument and result information for a declaration,
+/// definition, or call to the given destructor variant. It so
+/// happens that all three cases produce the same information.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType dtorKind) {
+ SmallVector<CanQualType, 2> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
- TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
+ TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
CanQual<FunctionProtoType> FTP = GetFormalType(D);
assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
- return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
+ RequiredArgs::All);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+/// Arrange the argument and result information for the declaration or
+/// definition of the given function.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance())
- return getFunctionInfo(MD);
+ return arrangeCXXMethodDeclaration(MD);
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+
assert(isa<FunctionType>(FTy));
- if (isa<FunctionNoProtoType>(FTy))
- return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+
+ // When declaring a function without a prototype, always use a
+ // non-variadic type.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
+ return arrangeFunctionType(noProto->getResultType(),
+ ArrayRef<CanQualType>(),
+ noProto->getExtInfo(),
+ RequiredArgs::All);
+ }
+
assert(isa<FunctionProtoType>(FTy));
- return getFunctionInfo(FTy.getAs<FunctionProtoType>());
+ return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of an Objective-C method.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
+ // It happens that this is the same as a call with no optional
+ // arguments, except also using the formal 'self' type.
+ return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
- SmallVector<CanQualType, 16> ArgTys;
- ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
- ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+/// Arrange the argument and result information for the function type
+/// through which to perform a send to the given Objective-C method,
+/// using the given receiver type. The receiver type is not always
+/// the 'self' type of the method or even an Objective-C pointer type.
+/// This is *not* the right method for actually performing such a
+/// message send, due to the possibility of optional arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType) {
+ SmallVector<CanQualType, 16> argTys;
+ argTys.push_back(Context.getCanonicalParamType(receiverType));
+ argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
// FIXME: Kill copy?
for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
e = MD->param_end(); i != e; ++i) {
- ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
}
FunctionType::ExtInfo einfo;
einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
MD->hasAttr<NSReturnsRetainedAttr>())
einfo = einfo.withProducesResult(true);
- return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
+ RequiredArgs required =
+ (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
+
+ return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
+ einfo, required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return getFunctionInfo(CD, GD.getCtorType());
+ return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return getFunctionInfo(DD, GD.getDtorType());
+ return arrangeCXXDestructor(DD, GD.getDtorType());
+
+ return arrangeFunctionDeclaration(FD);
+}
- return getFunctionInfo(FD);
+/// Figure out the rules for calling a function with the given formal
+/// type using the given arguments. The arguments are necessary
+/// because the function might be unprototyped, in which case it's
+/// target-dependent in crazy ways.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
+ RequiredArgs required = RequiredArgs::All;
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
+ if (proto->isVariadic())
+ required = RequiredArgs(proto->getNumArgs());
+ } else if (CGM.getTargetCodeGenInfo()
+ .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
+ required = RequiredArgs(0);
+ }
+
+ return arrangeFunctionCall(fnType->getResultType(), args,
+ fnType->getExtInfo(), required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const CallArgList &Args,
- const FunctionType::ExtInfo &Info) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
// FIXME: Kill copy.
- SmallVector<CanQualType, 16> ArgTys;
- for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ SmallVector<CanQualType, 16> argTypes;
+ for (CallArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+ argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const FunctionArgList &Args,
- const FunctionType::ExtInfo &Info) {
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
+ const FunctionArgList &args,
+ const FunctionType::ExtInfo &info,
+ bool isVariadic) {
// FIXME: Kill copy.
- SmallVector<CanQualType, 16> ArgTys;
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ SmallVector<CanQualType, 16> argTypes;
+ for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
i != e; ++i)
- ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
+ argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+
+ RequiredArgs required =
+ (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
}
-const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
- SmallVector<CanQualType, 1> args;
- return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
+const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
+ return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
+ FunctionType::ExtInfo(), RequiredArgs::All);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
- const SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info) {
+/// Arrange the argument and result information for an abstract value
+/// of a given function type. This is the method which all of the
+/// above functions ultimately defer to.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
#ifndef NDEBUG
- for (SmallVectorImpl<CanQualType>::const_iterator
- I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
+ for (ArrayRef<CanQualType>::const_iterator
+ I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
assert(I->isCanonicalAsParam());
#endif
- unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
+ unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
+ CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
- void *InsertPos = 0;
- CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+ void *insertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
if (FI)
return *FI;
- // Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
- Info.getHasRegParm(), Info.getRegParm(), ResTy,
- ArgTys.data(), ArgTys.size());
- FunctionInfos.InsertNode(FI, InsertPos);
+ // Construct the function info. We co-allocate the ArgInfos.
+ FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
+ FunctionInfos.InsertNode(FI, insertPos);
- bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
- assert(Inserted && "Recursively being processed?");
+ bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ assert(inserted && "Recursively being processed?");
// Compute ABI information.
getABIInfo().computeInfo(*FI);
@@ -274,39 +373,42 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// Loop over all of the computed argument and return value info. If any of
// them are direct or extend without a specified coerce type, specify the
// default now.
- ABIArgInfo &RetInfo = FI->getReturnInfo();
- if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
- RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
+ ABIArgInfo &retInfo = FI->getReturnInfo();
+ if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
+ retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
I != E; ++I)
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
I->info.setCoerceToType(ConvertType(I->type));
- bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
- assert(Erased && "Not in set?");
+ bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
+ assert(erased && "Not in set?");
return *FI;
}
-CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn, bool returnsRetained,
- bool _HasRegParm, unsigned _RegParm,
- CanQualType ResTy,
- const CanQualType *ArgTys,
- unsigned NumArgTys)
- : CallingConvention(_CallingConvention),
- EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
- HasRegParm(_HasRegParm), RegParm(_RegParm)
-{
- NumArgs = NumArgTys;
-
- // FIXME: Coallocate with the CGFunctionInfo object.
- Args = new ArgInfo[1 + NumArgTys];
- Args[0].type = ResTy;
- for (unsigned i = 0; i != NumArgTys; ++i)
- Args[1 + i].type = ArgTys[i];
+CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
+ const FunctionType::ExtInfo &info,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required) {
+ void *buffer = operator new(sizeof(CGFunctionInfo) +
+ sizeof(ArgInfo) * (argTypes.size() + 1));
+ CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
+ FI->CallingConvention = llvmCC;
+ FI->EffectiveCallingConvention = llvmCC;
+ FI->ASTCallingConvention = info.getCC();
+ FI->NoReturn = info.getNoReturn();
+ FI->ReturnsRetained = info.getProducesResult();
+ FI->Required = required;
+ FI->HasRegParm = info.getHasRegParm();
+ FI->RegParm = info.getRegParm();
+ FI->NumArgs = argTypes.size();
+ FI->getArgsBuffer()[0].type = resultType;
+ for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
+ FI->getArgsBuffer()[i + 1].type = argTypes[i];
+ return FI;
}
/***/
@@ -366,7 +468,7 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
QualType EltTy = CT->getElementType();
llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
- llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 0, "imag");
+ llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 1, "imag");
EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
} else {
EmitStoreThroughLValue(RValue::get(AI), LV);
@@ -521,7 +623,9 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
SI->setAlignment(1);
}
} else {
- CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
}
}
@@ -611,20 +715,24 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
return false;
}
-llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
- const CGFunctionInfo &FI = getFunctionInfo(GD);
+bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
+ if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
+ if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::LongDouble)
+ return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
+ }
+ }
- // For definition purposes, don't consider a K&R function variadic.
- bool Variadic = false;
- if (const FunctionProtoType *FPT =
- cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
- Variadic = FPT->isVariadic();
+ return false;
+}
- return GetFunctionType(FI, Variadic);
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
+ return GetFunctionType(FI);
}
llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
assert(Inserted && "Recursively being processed?");
@@ -675,6 +783,9 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Insert a padding type to ensure proper alignment.
+ if (llvm::Type *PaddingType = argAI.getPaddingType())
+ argTypes.push_back(PaddingType);
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
@@ -697,7 +808,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
assert(Erased && "Not in set?");
- return llvm::FunctionType::get(resultType, argTypes, isVariadic);
+ return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
}
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
@@ -709,18 +820,18 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
- Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
else
- Info = &getFunctionInfo(MD);
- return GetFunctionType(*Info, FPT->isVariadic());
+ Info = &arrangeCXXMethodDeclaration(MD);
+ return GetFunctionType(*Info);
}
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
AttributeListType &PAL,
unsigned &CallingConv) {
- unsigned FuncAttrs = 0;
- unsigned RetAttrs = 0;
+ llvm::Attributes FuncAttrs;
+ llvm::Attributes RetAttrs;
CallingConv = FI.getEffectiveCallingConvention();
@@ -806,7 +917,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
const ABIArgInfo &AI = it->info;
- unsigned Attributes = 0;
+ llvm::Attributes Attrs;
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -814,20 +925,24 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerOrEnumerationType())
- Attributes |= llvm::Attribute::SExt;
+ Attrs |= llvm::Attribute::SExt;
else if (ParamType->isUnsignedIntegerOrEnumerationType())
- Attributes |= llvm::Attribute::ZExt;
+ Attrs |= llvm::Attribute::ZExt;
// FALL THROUGH
case ABIArgInfo::Direct:
if (RegParm > 0 &&
- (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ (ParamType->isIntegerType() || ParamType->isPointerType() ||
+ ParamType->isReferenceType())) {
RegParm -=
(Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
if (RegParm >= 0)
- Attributes |= llvm::Attribute::InReg;
+ Attrs |= llvm::Attribute::InReg;
}
// FIXME: handle sseregparm someday...
+ // Increment Index if there is padding.
+ Index += (AI.getPaddingType() != 0);
+
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
Index += STy->getNumElements()-1; // 1 will be added below.
@@ -835,9 +950,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
if (AI.getIndirectByVal())
- Attributes |= llvm::Attribute::ByVal;
+ Attrs |= llvm::Attribute::ByVal;
- Attributes |=
+ Attrs |=
llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
// byval disables readnone and readonly.
FuncAttrs &= ~(llvm::Attribute::ReadOnly |
@@ -859,8 +974,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
}
}
- if (Attributes)
- PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+ if (Attrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
++Index;
}
if (FuncAttrs)
@@ -970,6 +1085,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Skip the dummy padding argument.
+ if (ArgI.getPaddingType())
+ ++AI;
+
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
ArgI.getCoerceToType() == ConvertType(Ty) &&
@@ -991,7 +1110,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
break;
}
- llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
@@ -1015,15 +1134,36 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (llvm::StructType *STy =
- dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
- Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ if (STy && STy->getNumElements() > 1) {
+ uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
+ llvm::Type *DstTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ if (SrcSize <= DstSize) {
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ llvm::AllocaInst *TempAlloca =
+ CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
+ TempAlloca->setAlignment(AlignmentToUse);
+ llvm::Value *TempV = TempAlloca;
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
@@ -1047,10 +1187,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
- llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
- EmitParmDecl(*Arg, Temp, ArgNo);
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
+ CharUnits Align = getContext().getDeclAlign(Arg);
+ Alloca->setAlignment(Align.getQuantity());
+ LValue LV = MakeAddrLValue(Alloca, Ty, Align);
+ llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
+ EmitParmDecl(*Arg, Alloca, ArgNo);
// Name the arguments used in expansion and increment AI.
unsigned Index = 0;
@@ -1076,6 +1218,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
+static void eraseUnusedBitCasts(llvm::Instruction *insn) {
+ while (insn->use_empty()) {
+ llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
+ if (!bitcast) return;
+
+ // This is "safe" because we would have used a ConstantExpr otherwise.
+ insn = cast<llvm::Instruction>(bitcast->getOperand(0));
+ bitcast->eraseFromParent();
+ }
+}
+
/// Try to emit a fused autorelease of a return result.
static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
@@ -1154,9 +1307,54 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(result, resultType);
}
+/// If this is a +1 of the value of an immutable 'self', remove it.
+static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // This is only applicable to a method with an immutable 'self'.
+ const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ if (!method) return 0;
+ const VarDecl *self = method->getSelfDecl();
+ if (!self->getType().isConstQualified()) return 0;
+
+ // Look for a retain call.
+ llvm::CallInst *retainCall =
+ dyn_cast<llvm::CallInst>(result->stripPointerCasts());
+ if (!retainCall ||
+ retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
+ return 0;
+
+ // Look for an ordinary load of 'self'.
+ llvm::Value *retainedValue = retainCall->getArgOperand(0);
+ llvm::LoadInst *load =
+ dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
+ if (!load || load->isAtomic() || load->isVolatile() ||
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
+ return 0;
+
+ // Okay! Burn it all down. This relies for correctness on the
+ // assumption that the retain is emitted as part of the return and
+ // that thereafter everything is used "linearly".
+ llvm::Type *resultType = result->getType();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(result));
+ assert(retainCall->use_empty());
+ retainCall->eraseFromParent();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
+
+ return CGF.Builder.CreateBitCast(load, resultType);
+}
+
/// Emit an ARC autorelease of the result of a function.
+///
+/// \return the value to actually return from the function
static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
+ // If we're returning 'self', kill the initial retain. This is a
+ // heuristic attempt to "encourage correctness" in the really unfortunate
+ // case where we have a return of self during a dealloc and we desperately
+ // need to avoid the possible autorelease.
+ if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
+ return self;
+
// At -O0, try to emit a fused retain/autorelease.
if (CGF.shouldUseFusedARCCalls())
if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
@@ -1165,6 +1363,44 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
return CGF.EmitARCAutoreleaseReturnValue(result);
}
+/// Heuristically search for a dominating store to the return-value slot.
+static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ // If there are multiple uses of the return-value slot, just check
+ // for something immediately preceding the IP. Sometimes this can
+ // happen with how we generate implicit-returns; it can also happen
+ // with noreturn cleanups.
+ if (!CGF.ReturnValue->hasOneUse()) {
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ if (IP->empty()) return 0;
+ llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
+ if (!store) return 0;
+ if (store->getPointerOperand() != CGF.ReturnValue) return 0;
+ assert(!store->isAtomic() && !store->isVolatile()); // see below
+ return store;
+ }
+
+ llvm::StoreInst *store =
+ dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
+ if (!store) return 0;
+
+ // These aren't actually possible for non-coerced returns, and we
+ // only care about non-coerced returns on this code path.
+ assert(!store->isAtomic() && !store->isVolatile());
+
+ // Now do a first-and-dirty dominance check: just walk up the
+ // single-predecessors chain from the current insertion point.
+ llvm::BasicBlock *StoreBB = store->getParent();
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ while (IP != StoreBB) {
+ if (!(IP = IP->getSinglePredecessor()))
+ return 0;
+ }
+
+ // Okay, the store's basic block dominates the insertion point; we
+ // can do our thing.
+ return store;
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
if (ReturnValue == 0) {
@@ -1199,16 +1435,9 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
- // If the instruction right before the insertion point is a store to the
- // return value, we can elide the load, zap the store, and usually zap the
- // alloca.
- llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
- llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
- !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
- SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
- RV = Builder.CreateLoad(ReturnValue);
- } else {
+ // If there is a dominating store to ReturnValue, we can elide
+ // the load, zap the store, and usually zap the alloca.
+ if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
// Get the stored value and nuke the now-dead store.
RetDbgLoc = SI->getDebugLoc();
RV = SI->getValueOperand();
@@ -1219,6 +1448,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
ReturnValue = 0;
}
+
+ // Otherwise, we have to do a simple load.
+ } else {
+ RV = Builder.CreateLoad(ReturnValue);
}
} else {
llvm::Value *V = ReturnValue;
@@ -1236,7 +1469,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// In ARC, end functions that return a retainable type with a call
// to objc_autoreleaseReturnValue.
if (AutoreleaseResult) {
- assert(getLangOptions().ObjCAutoRefCount &&
+ assert(getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() &&
RetTy->isObjCRetainableType());
RV = emitAutoreleaseOfResult(*this, RV);
@@ -1431,7 +1664,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
- assert(getContext().getLangOptions().ObjCAutoRefCount);
+ assert(getContext().getLangOpts().ObjCAutoRefCount);
assert(getContext().hasSameType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
@@ -1450,14 +1683,23 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
- args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
- type, /*NeedsCopy*/true);
+ args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
return;
}
args.add(EmitAnyExprToTemp(E), type);
}
+// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+// optimizer it can aggressively ignore unwind edges.
+void
+CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
+ CGM.getNoObjCARCExceptionsMetadata());
+}
+
/// Emits a call or invoke instruction to the given function, depending
/// on the current state of the EH stack.
llvm::CallSite
@@ -1465,14 +1707,22 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
+
+ llvm::Instruction *Inst;
if (!InvokeDest)
- return Builder.CreateCall(Callee, Args, Name);
+ Inst = Builder.CreateCall(Callee, Args, Name);
+ else {
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
+ EmitBlock(ContBB);
+ }
- llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
- llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
- Args, Name);
- EmitBlock(ContBB);
- return Invoke;
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(Inst);
+
+ return Inst;
}
llvm::CallSite
@@ -1501,8 +1751,11 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
LValue LV = MakeAddrLValue(EltAddr, EltTy);
RValue EltRV;
- if (CodeGenFunction::hasAggregateLLVMType(EltTy))
- EltRV = RValue::getAggregate(LV.getAddress());
+ if (EltTy->isAnyComplexType())
+ // FIXME: Volatile?
+ EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
+ EltRV = LV.asAggregateRValue();
else
EltRV = EmitLoadOfLValue(LV);
ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
@@ -1519,13 +1772,16 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
// FIXME: What are the right qualifiers here?
LValue LV = EmitLValueForField(Addr, FD, 0);
RValue FldRV;
- if (CodeGenFunction::hasAggregateLLVMType(FT))
- FldRV = RValue::getAggregate(LV.getAddress());
+ if (FT->isAnyComplexType())
+ // FIXME: Volatile?
+ FldRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(FT))
+ FldRV = LV.asAggregateRValue();
else
FldRV = EmitLoadOfLValue(LV);
ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
}
- } else if (isa<ComplexType>(Ty)) {
+ } else if (Ty->isAnyComplexType()) {
ComplexPairTy CV = RV.getComplexVal();
Args.push_back(CV.first);
Args.push_back(CV.second);
@@ -1639,6 +1895,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
+ // Insert a padding argument to ensure proper alignment.
+ if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
+ Args.push_back(llvm::UndefValue::get(PaddingType));
+ ++IRArgNo;
+ }
+
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
@@ -1769,6 +2031,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(CS.getInstruction());
+
// If the call doesn't return, finish the basic block and clear the
// insertion point; this allows the rest of IRgen to discard
// unreachable code.
OpenPOWER on IntegriCloud