summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp1512
1 files changed, 918 insertions, 594 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 17c3354..6403fa9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -47,7 +47,10 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
- // TODO: add support for CC_X86Pascal to llvm
+ // TODO: Add support for __pascal to LLVM.
+ case CC_X86Pascal: return llvm::CallingConv::C;
+ // TODO: Add support for __vectorcall to LLVM.
+ case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
}
}
@@ -80,42 +83,25 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
- false, None, FTNP->getExtInfo(),
- RequiredArgs(0));
+ /*instanceMethod=*/false,
+ /*chainCall=*/false, None,
+ FTNP->getExtInfo(), RequiredArgs(0));
}
/// Arrange the LLVM function layout for a value of the given function
-/// type, on top of any implicit parameters already stored. Use the
-/// given ExtInfo instead of the ExtInfo from the function type.
-static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
- bool IsInstanceMethod,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP,
- FunctionType::ExtInfo extInfo) {
+/// type, on top of any implicit parameters already stored.
+static const CGFunctionInfo &
+arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP) {
RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
prefix.push_back(FTP->getParamType(i));
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
- return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix,
- extInfo, required);
-}
-
-/// Arrange the argument and result information for a free function (i.e.
-/// not a C++ or ObjC instance method) of the given type.
-static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP) {
- return arrangeLLVMFunctionInfo(CGT, false, prefix, FTP, FTP->getExtInfo());
-}
-
-/// Arrange the argument and result information for a free function (i.e.
-/// not a C++ or ObjC instance method) of the given type.
-static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP) {
- FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(CGT, true, prefix, FTP, extInfo);
+ return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
+ /*chainCall=*/false, prefix,
+ FTP->getExtInfo(), required);
}
/// Arrange the argument and result information for a value of the
@@ -123,7 +109,8 @@ static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
- return ::arrangeFreeFunctionType(*this, argTypes, FTP);
+ return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
+ FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
@@ -137,6 +124,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
+ if (D->hasAttr<VectorCallAttr>())
+ return CC_X86VectorCall;
+
if (D->hasAttr<PascalAttr>())
return CC_X86Pascal;
@@ -158,23 +148,6 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
return CC_C;
}
-static bool isAAPCSVFP(const CGFunctionInfo &FI, const TargetInfo &Target) {
- switch (FI.getEffectiveCallingConvention()) {
- case llvm::CallingConv::C:
- switch (Target.getTriple().getEnvironment()) {
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABIHF:
- return true;
- default:
- return false;
- }
- case llvm::CallingConv::ARM_AAPCS_VFP:
- return true;
- default:
- return false;
- }
-}
-
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
/// (Zero value of RD means we don't have any meaningful "this" argument type,
@@ -192,8 +165,9 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
else
argTypes.push_back(Context.VoidPtrTy);
- return ::arrangeCXXMethodType(*this, argTypes,
- FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+ return ::arrangeLLVMFunctionInfo(
+ *this, true, argTypes,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
/// Arrange the argument and result information for a declaration or
@@ -216,31 +190,41 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
return arrangeFreeFunctionType(prototype);
}
-/// Arrange the argument and result information for a declaration
-/// or definition to the given constructor variant.
const CGFunctionInfo &
-CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
- CXXCtorType ctorKind) {
+CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
+ StructorType Type) {
+
SmallVector<CanQualType, 16> argTypes;
- argTypes.push_back(GetThisType(Context, D->getParent()));
+ argTypes.push_back(GetThisType(Context, MD->getParent()));
- GlobalDecl GD(D, ctorKind);
- CanQualType resultType =
- TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ } else {
+ auto *DD = dyn_cast<CXXDestructorDecl>(MD);
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
+ }
- CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
// Add the formal parameters.
for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
argTypes.push_back(FTP->getParamType(i));
- TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
+ TheCXXABI.buildStructorSignature(MD, Type, argTypes);
RequiredArgs required =
- (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
+ (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required);
+ CanQualType resultType = TheCXXABI.HasThisReturn(GD)
+ ? argTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
+ return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, extInfo,
+ required);
}
/// Arrange a call to a C++ method, passing the given arguments.
@@ -251,42 +235,22 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
unsigned ExtraArgs) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> ArgTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end(); i != e;
- ++i)
- ArgTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ for (const auto &Arg : args)
+ ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
CanQual<FunctionProtoType> FPT = GetFormalType(D);
RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
GlobalDecl GD(D, CtorKind);
- CanQualType ResultType =
- TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy;
+ CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
+ ? ArgTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
FunctionType::ExtInfo Info = FPT->getExtInfo();
- return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required);
-}
-
-/// Arrange the argument and result information for a declaration,
-/// definition, or call to the given destructor variant. It so
-/// happens that all three cases produce the same information.
-const CGFunctionInfo &
-CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType dtorKind) {
- SmallVector<CanQualType, 2> argTypes;
- argTypes.push_back(GetThisType(Context, D->getParent()));
-
- GlobalDecl GD(D, dtorKind);
- CanQualType resultType =
- TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
-
- TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
-
- CanQual<FunctionProtoType> FTP = GetFormalType(D);
- assert(FTP->getNumParams() == 0 && "dtor with formal parameters");
- assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
-
- FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo,
- RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, ArgTypes, Info,
+ Required);
}
/// Arrange the argument and result information for the declaration or
@@ -305,8 +269,9 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// non-variadic type.
if (isa<FunctionNoProtoType>(FTy)) {
CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
- return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None,
- noProto->getExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(
+ noProto->getReturnType(), /*instanceMethod=*/false,
+ /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
}
assert(isa<FunctionProtoType>(FTy));
@@ -350,8 +315,9 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
- return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false,
- argTys, einfo, required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTys, einfo, required);
}
const CGFunctionInfo &
@@ -360,14 +326,29 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
+ return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return arrangeCXXDestructor(DD, GD.getDtorType());
+ return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
return arrangeFunctionDeclaration(FD);
}
+/// Arrange a thunk that takes 'this' as the first parameter followed by
+/// varargs. Return a void pointer, regardless of the actual return type.
+/// The body of the thunk will end in a musttail call to a function of the
+/// correct type, and the caller will bitcast the function to the correct
+/// prototype.
+const CGFunctionInfo &
+CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
+ assert(MD->isVirtual() && "only virtual memptrs have thunks");
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
+ CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
+ return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
+ /*chainCall=*/false, ArgTys,
+ FTP->getExtInfo(), RequiredArgs(1));
+}
+
/// Arrange a call as unto a free function, except possibly with an
/// additional number of formal parameters considered required.
static const CGFunctionInfo &
@@ -375,7 +356,8 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
CodeGenModule &CGM,
const CallArgList &args,
const FunctionType *fnType,
- unsigned numExtraRequiredArgs) {
+ unsigned numExtraRequiredArgs,
+ bool chainCall) {
assert(args.size() >= numExtraRequiredArgs);
// In most cases, there are no optional arguments.
@@ -397,8 +379,13 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
required = RequiredArgs(args.size());
}
- return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args,
- fnType->getExtInfo(), required);
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (const auto &arg : args)
+ argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
+ return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
+ /*instanceMethod=*/false, chainCall,
+ argTypes, fnType->getExtInfo(), required);
}
/// Figure out the rules for calling a function with the given formal
@@ -407,8 +394,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
/// target-dependent in crazy ways.
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
- const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0);
+ const FunctionType *fnType,
+ bool chainCall) {
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
+ chainCall ? 1 : 0, chainCall);
}
/// A block function call is essentially a free-function call with an
@@ -416,7 +405,8 @@ CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
const CGFunctionInfo &
CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1);
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
+ /*chainCall=*/false);
}
const CGFunctionInfo &
@@ -426,11 +416,11 @@ CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType(i->Ty));
- return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes,
- info, required);
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
}
/// Arrange a call to a C++ method, passing the given arguments.
@@ -440,13 +430,13 @@ CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
FunctionType::ExtInfo info = FPT->getExtInfo();
- return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true,
- argTypes, info, required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
@@ -454,19 +444,20 @@ const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
const FunctionType::ExtInfo &info, bool isVariadic) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+ for (auto Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
RequiredArgs required =
(isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
- return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info,
- required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
- return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None,
- FunctionType::ExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(
+ getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
+ None, FunctionType::ExtInfo(), RequiredArgs::All);
}
/// Arrange the argument and result information for an abstract value
@@ -474,22 +465,20 @@ const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
/// above functions ultimately defer to.
const CGFunctionInfo &
CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
- bool IsInstanceMethod,
+ bool instanceMethod,
+ bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs required) {
-#ifndef NDEBUG
- for (ArrayRef<CanQualType>::const_iterator
- I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
- assert(I->isCanonicalAsParam());
-#endif
+ assert(std::all_of(argTypes.begin(), argTypes.end(),
+ std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType,
- argTypes);
+ CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
+ resultType, argTypes);
void *insertPos = nullptr;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
@@ -497,11 +486,12 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
return *FI;
// Construct the function info. We co-allocate the ArgInfos.
- FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes,
- required);
+ FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
+ resultType, argTypes, required);
FunctionInfos.InsertNode(FI, insertPos);
- bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ bool inserted = FunctionsBeingProcessed.insert(FI).second;
+ (void)inserted;
assert(inserted && "Recursively being processed?");
// Compute ABI information.
@@ -525,7 +515,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
}
CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
- bool IsInstanceMethod,
+ bool instanceMethod,
+ bool chainCall,
const FunctionType::ExtInfo &info,
CanQualType resultType,
ArrayRef<CanQualType> argTypes,
@@ -536,7 +527,8 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->CallingConvention = llvmCC;
FI->EffectiveCallingConvention = llvmCC;
FI->ASTCallingConvention = info.getCC();
- FI->InstanceMethod = IsInstanceMethod;
+ FI->InstanceMethod = instanceMethod;
+ FI->ChainCall = chainCall;
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
FI->Required = required;
@@ -552,13 +544,79 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
/***/
-void CodeGenTypes::GetExpandedTypes(QualType type,
- SmallVectorImpl<llvm::Type*> &expandedTypes) {
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
- uint64_t NumElts = AT->getSize().getZExtValue();
- for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
- GetExpandedTypes(AT->getElementType(), expandedTypes);
- } else if (const RecordType *RT = type->getAs<RecordType>()) {
+namespace {
+// ABIArgInfo::Expand implementation.
+
+// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
+struct TypeExpansion {
+ enum TypeExpansionKind {
+ // Elements of constant arrays are expanded recursively.
+ TEK_ConstantArray,
+ // Record fields are expanded recursively (but if record is a union, only
+ // the field with the largest size is expanded).
+ TEK_Record,
+ // For complex types, real and imaginary parts are expanded recursively.
+ TEK_Complex,
+ // All other types are not expandable.
+ TEK_None
+ };
+
+ const TypeExpansionKind Kind;
+
+ TypeExpansion(TypeExpansionKind K) : Kind(K) {}
+ virtual ~TypeExpansion() {}
+};
+
+struct ConstantArrayExpansion : TypeExpansion {
+ QualType EltTy;
+ uint64_t NumElts;
+
+ ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
+ : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_ConstantArray;
+ }
+};
+
+struct RecordExpansion : TypeExpansion {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+
+ SmallVector<const FieldDecl *, 1> Fields;
+
+ RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
+ SmallVector<const FieldDecl *, 1> &&Fields)
+ : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Record;
+ }
+};
+
+struct ComplexExpansion : TypeExpansion {
+ QualType EltTy;
+
+ ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Complex;
+ }
+};
+
+struct NoExpansion : TypeExpansion {
+ NoExpansion() : TypeExpansion(TEK_None) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_None;
+ }
+};
+} // namespace
+
+static std::unique_ptr<TypeExpansion>
+getTypeExpansion(QualType Ty, const ASTContext &Context) {
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ return llvm::make_unique<ConstantArrayExpansion>(
+ AT->getElementType(), AT->getSize().getZExtValue());
+ }
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+ SmallVector<const FieldDecl *, 1> Fields;
const RecordDecl *RD = RT->getDecl();
assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
@@ -569,88 +627,178 @@ void CodeGenTypes::GetExpandedTypes(QualType type,
CharUnits UnionSize = CharUnits::Zero();
for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
+ CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
if (UnionSize < FieldSize) {
UnionSize = FieldSize;
LargestFD = FD;
}
}
if (LargestFD)
- GetExpandedTypes(LargestFD->getType(), expandedTypes);
+ Fields.push_back(LargestFD);
} else {
- for (const auto *I : RD->fields()) {
- assert(!I->isBitField() &&
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ assert(!CXXRD->isDynamicClass() &&
+ "cannot expand vtable pointers in dynamic classes");
+ for (const CXXBaseSpecifier &BS : CXXRD->bases())
+ Bases.push_back(&BS);
+ }
+
+ for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
+ assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
- GetExpandedTypes(I->getType(), expandedTypes);
+ Fields.push_back(FD);
}
}
- } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
- llvm::Type *EltTy = ConvertType(CT->getElementType());
- expandedTypes.push_back(EltTy);
- expandedTypes.push_back(EltTy);
- } else
- expandedTypes.push_back(ConvertType(type));
+ return llvm::make_unique<RecordExpansion>(std::move(Bases),
+ std::move(Fields));
+ }
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ return llvm::make_unique<ComplexExpansion>(CT->getElementType());
+ }
+ return llvm::make_unique<NoExpansion>();
}
-llvm::Function::arg_iterator
-CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
- llvm::Function::arg_iterator AI) {
- assert(LV.isSimple() &&
- "Unexpected non-simple lvalue during struct expansion.");
+static int getExpansionSize(QualType Ty, const ASTContext &Context) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
+ }
+ if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ int Res = 0;
+ for (auto BS : RExp->Bases)
+ Res += getExpansionSize(BS->getType(), Context);
+ for (auto FD : RExp->Fields)
+ Res += getExpansionSize(FD->getType(), Context);
+ return Res;
+ }
+ if (isa<ComplexExpansion>(Exp.get()))
+ return 2;
+ assert(isa<NoExpansion>(Exp.get()));
+ return 1;
+}
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- unsigned NumElts = AT->getSize().getZExtValue();
- QualType EltTy = AT->getElementType();
- for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
- LValue LV = MakeAddrLValue(EltAddr, EltTy);
- AI = ExpandTypeFromArgs(EltTy, LV, AI);
+void
+CodeGenTypes::getExpandedTypes(QualType Ty,
+ SmallVectorImpl<llvm::Type *>::iterator &TI) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ getExpandedTypes(CAExp->EltTy, TI);
}
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
- if (RD->isUnion()) {
- // Unions can be here only in degenerative cases - all the fields are same
- // after flattening. Thus we have to use the "largest" field.
- const FieldDecl *LargestFD = nullptr;
- CharUnits UnionSize = CharUnits::Zero();
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ for (auto BS : RExp->Bases)
+ getExpandedTypes(BS->getType(), TI);
+ for (auto FD : RExp->Fields)
+ getExpandedTypes(FD->getType(), TI);
+ } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
+ llvm::Type *EltTy = ConvertType(CExp->EltTy);
+ *TI++ = EltTy;
+ *TI++ = EltTy;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ *TI++ = ConvertType(Ty);
+ }
+}
- for (const auto *FD : RD->fields()) {
- assert(!FD->isBitField() &&
- "Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
- if (UnionSize < FieldSize) {
- UnionSize = FieldSize;
- LargestFD = FD;
- }
- }
- if (LargestFD) {
- // FIXME: What are the right qualifiers here?
- LValue SubLV = EmitLValueForField(LV, LargestFD);
- AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
- }
- } else {
- for (const auto *FD : RD->fields()) {
- QualType FT = FD->getType();
+void CodeGenFunction::ExpandTypeFromArgs(
+ QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
- // FIXME: What are the right qualifiers here?
- LValue SubLV = EmitLValueForField(LV, FD);
- AI = ExpandTypeFromArgs(FT, SubLV, AI);
- }
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, i);
+ LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
+ ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
+ }
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ llvm::Value *This = LV.getAddress();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ LValue SubLV = MakeAddrLValue(Base, BS->getType());
+
+ // Recurse onto bases.
+ ExpandTypeFromArgs(BS->getType(), SubLV, AI);
}
- } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- QualType EltTy = CT->getElementType();
+ for (auto FD : RExp->Fields) {
+ // FIXME: What are the right qualifiers here?
+ LValue SubLV = EmitLValueForField(LV, FD);
+ ExpandTypeFromArgs(FD->getType(), SubLV, AI);
+ }
+ } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
- EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
+ EmitStoreThroughLValue(RValue::get(*AI++),
+ MakeAddrLValue(RealAddr, CExp->EltTy));
llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
- EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
+ EmitStoreThroughLValue(RValue::get(*AI++),
+ MakeAddrLValue(ImagAddr, CExp->EltTy));
} else {
- EmitStoreThroughLValue(RValue::get(AI), LV);
- ++AI;
+ assert(isa<NoExpansion>(Exp.get()));
+ EmitStoreThroughLValue(RValue::get(*AI++), LV);
}
+}
+
+void CodeGenFunction::ExpandTypeToArgs(
+ QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, i);
+ RValue EltRV =
+ convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
+ ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
+ }
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ llvm::Value *This = RV.getAggregateAddr();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ RValue BaseRV = RValue::getAggregate(Base);
+
+ // Recurse onto bases.
+ ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+
+ LValue LV = MakeAddrLValue(This, Ty);
+ for (auto FD : RExp->Fields) {
+ RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
+ ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+ } else if (isa<ComplexExpansion>(Exp.get())) {
+ ComplexPairTy CV = RV.getComplexVal();
+ IRCallArgs[IRCallArgPos++] = CV.first;
+ IRCallArgs[IRCallArgPos++] = CV.second;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (IRCallArgPos < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRCallArgPos))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
- return AI;
+ IRCallArgs[IRCallArgPos++] = V;
+ }
}
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
@@ -667,11 +815,13 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
llvm::Type *FirstElt = SrcSTy->getElementType(0);
// If the first elt is at least as large as what we're looking for, or if the
- // first element is the same size as the whole struct, we can enter it.
+ // first element is the same size as the whole struct, we can enter it. The
+ // comparison must be made on the store size and not the alloca size. Using
+ // the alloca size may overstate the size of the load.
uint64_t FirstEltSize =
- CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
+ CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
if (FirstEltSize < DstSize &&
- FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
+ FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
return SrcPtr;
// GEP into the first element.
@@ -890,6 +1040,145 @@ static void CreateCoercedStore(llvm::Value *Src,
}
}
+namespace {
+
+/// Encapsulates information about the way function arguments from
+/// CGFunctionInfo should be passed to actual LLVM IR function.
+class ClangToLLVMArgMapping {
+ static const unsigned InvalidIndex = ~0U;
+ unsigned InallocaArgNo;
+ unsigned SRetArgNo;
+ unsigned TotalIRArgs;
+
+ /// Arguments of LLVM IR function corresponding to single Clang argument.
+ struct IRArgs {
+ unsigned PaddingArgIndex;
+ // Argument is expanded to IR arguments at positions
+ // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
+ unsigned FirstArgIndex;
+ unsigned NumberOfArgs;
+
+ IRArgs()
+ : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
+ NumberOfArgs(0) {}
+ };
+
+ SmallVector<IRArgs, 8> ArgInfo;
+
+public:
+ ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs = false)
+ : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
+ ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
+ construct(Context, FI, OnlyRequiredArgs);
+ }
+
+ bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
+ unsigned getInallocaArgNo() const {
+ assert(hasInallocaArg());
+ return InallocaArgNo;
+ }
+
+ bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
+ unsigned getSRetArgNo() const {
+ assert(hasSRetArg());
+ return SRetArgNo;
+ }
+
+ unsigned totalIRArgs() const { return TotalIRArgs; }
+
+ bool hasPaddingArg(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
+ }
+ unsigned getPaddingArgNo(unsigned ArgNo) const {
+ assert(hasPaddingArg(ArgNo));
+ return ArgInfo[ArgNo].PaddingArgIndex;
+ }
+
+ /// Returns index of first IR argument corresponding to ArgNo, and their
+ /// quantity.
+ std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
+ ArgInfo[ArgNo].NumberOfArgs);
+ }
+
+private:
+ void construct(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs);
+};
+
+void ClangToLLVMArgMapping::construct(const ASTContext &Context,
+ const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs) {
+ unsigned IRArgNo = 0;
+ bool SwapThisWithSRet = false;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ if (RetAI.getKind() == ABIArgInfo::Indirect) {
+ SwapThisWithSRet = RetAI.isSRetAfterThis();
+ SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
+ }
+
+ unsigned ArgNo = 0;
+ unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
+ ++I, ++ArgNo) {
+ assert(I != FI.arg_end());
+ QualType ArgType = I->type;
+ const ABIArgInfo &AI = I->info;
+ // Collect data about IR arguments corresponding to Clang argument ArgNo.
+ auto &IRArgs = ArgInfo[ArgNo];
+
+ if (AI.getPaddingType())
+ IRArgs.PaddingArgIndex = IRArgNo++;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // FIXME: handle sseregparm someday...
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
+ if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
+ IRArgs.NumberOfArgs = STy->getNumElements();
+ } else {
+ IRArgs.NumberOfArgs = 1;
+ }
+ break;
+ }
+ case ABIArgInfo::Indirect:
+ IRArgs.NumberOfArgs = 1;
+ break;
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::InAlloca:
+ // ignore and inalloca doesn't have matching LLVM parameters.
+ IRArgs.NumberOfArgs = 0;
+ break;
+ case ABIArgInfo::Expand: {
+ IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
+ break;
+ }
+ }
+
+ if (IRArgs.NumberOfArgs > 0) {
+ IRArgs.FirstArgIndex = IRArgNo;
+ IRArgNo += IRArgs.NumberOfArgs;
+ }
+
+ // Skip over the sret parameter when it comes second. We already handled it
+ // above.
+ if (IRArgNo == 1 && SwapThisWithSRet)
+ IRArgNo++;
+ }
+ assert(ArgNo == ArgInfo.size());
+
+ if (FI.usesInAlloca())
+ InallocaArgNo = IRArgNo++;
+
+ TotalIRArgs = IRArgNo;
+}
+} // namespace
+
/***/
bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
@@ -936,14 +1225,12 @@ llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
llvm::FunctionType *
CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
-
- bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
+ (void)Inserted;
assert(Inserted && "Recursively being processed?");
-
- bool SwapThisWithSRet = false;
- SmallVector<llvm::Type*, 8> argTypes;
- llvm::Type *resultType = nullptr;
+ llvm::Type *resultType = nullptr;
const ABIArgInfo &retAI = FI.getReturnInfo();
switch (retAI.getKind()) {
case ABIArgInfo::Expand:
@@ -969,13 +1256,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::Indirect: {
assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
resultType = llvm::Type::getVoidTy(getLLVMContext());
-
- QualType ret = FI.getReturnType();
- llvm::Type *ty = ConvertType(ret);
- unsigned addressSpace = Context.getTargetAddressSpace(ret);
- argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
-
- SwapThisWithSRet = retAI.isSRetAfterThis();
break;
}
@@ -984,67 +1264,83 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
break;
}
- // Add in all of the required arguments.
- CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
- if (FI.isVariadic()) {
- ie = it + FI.getRequiredArgs().getNumRequiredArgs();
- } else {
- ie = FI.arg_end();
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
+ SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
+
+ // Add type for sret argument.
+ if (IRFunctionArgs.hasSRetArg()) {
+ QualType Ret = FI.getReturnType();
+ llvm::Type *Ty = ConvertType(Ret);
+ unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
+ ArgTypes[IRFunctionArgs.getSRetArgNo()] =
+ llvm::PointerType::get(Ty, AddressSpace);
+ }
+
+ // Add type for inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
+ auto ArgStruct = FI.getArgStruct();
+ assert(ArgStruct);
+ ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
}
- for (; it != ie; ++it) {
- const ABIArgInfo &argAI = it->info;
+
+ // Add in all of the required arguments.
+ unsigned ArgNo = 0;
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = it + FI.getNumRequiredArgs();
+ for (; it != ie; ++it, ++ArgNo) {
+ const ABIArgInfo &ArgInfo = it->info;
// Insert a padding type to ensure proper alignment.
- if (llvm::Type *PaddingType = argAI.getPaddingType())
- argTypes.push_back(PaddingType);
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ ArgInfo.getPaddingType();
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
- switch (argAI.getKind()) {
+ switch (ArgInfo.getKind()) {
case ABIArgInfo::Ignore:
case ABIArgInfo::InAlloca:
+ assert(NumIRArgs == 0);
break;
case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
// indirect arguments are always on the stack, which is addr space #0.
llvm::Type *LTy = ConvertTypeForMem(it->type);
- argTypes.push_back(LTy->getPointerTo());
+ ArgTypes[FirstIRArg] = LTy->getPointerTo();
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // If the coerce-to type is a first class aggregate, flatten it. Either
- // way is semantically identical, but fast-isel and the optimizer
- // generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
- llvm::Type *argType = argAI.getCoerceToType();
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
+ llvm::Type *argType = ArgInfo.getCoerceToType();
llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
- if (st && !isAAPCSVFP(FI, getTarget())) {
+ if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
+ assert(NumIRArgs == st->getNumElements());
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
- argTypes.push_back(st->getElementType(i));
+ ArgTypes[FirstIRArg + i] = st->getElementType(i);
} else {
- argTypes.push_back(argType);
+ assert(NumIRArgs == 1);
+ ArgTypes[FirstIRArg] = argType;
}
break;
}
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, argTypes);
+ auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
+ getExpandedTypes(it->type, ArgTypesIter);
+ assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
break;
}
}
- // Add the inalloca struct as the last parameter type.
- if (llvm::StructType *ArgStruct = FI.getArgStruct())
- argTypes.push_back(ArgStruct->getPointerTo());
-
- if (SwapThisWithSRet)
- std::swap(argTypes[0], argTypes[1]);
-
bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
assert(Erased && "Not in set?");
-
- return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
+
+ return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
}
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
@@ -1056,7 +1352,8 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
- Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ Info =
+ &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
else
Info = &arrangeCXXMethodDeclaration(MD);
return GetFunctionType(*Info);
@@ -1069,6 +1366,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
+ bool HasOptnone = false;
CallingConv = FI.getEffectiveCallingConvention();
@@ -1109,12 +1407,18 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
RetAttrs.addAttribute(llvm::Attribute::NoAlias);
if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
RetAttrs.addAttribute(llvm::Attribute::NonNull);
+
+ HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
+ }
+
+ // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
+ if (!HasOptnone) {
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
+ if (CodeGenOpts.OptimizeSize == 2)
+ FuncAttrs.addAttribute(llvm::Attribute::MinSize);
}
- if (CodeGenOpts.OptimizeSize)
- FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
- if (CodeGenOpts.OptimizeSize == 2)
- FuncAttrs.addAttribute(llvm::Attribute::MinSize);
if (CodeGenOpts.DisableRedZone)
FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
if (CodeGenOpts.NoImplicitFloat)
@@ -1156,9 +1460,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute("no-realign-stack");
}
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
+
QualType RetTy = FI.getReturnType();
- unsigned Index = 1;
- bool SwapThisWithSRet = false;
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
@@ -1174,25 +1478,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::InAlloca: {
- // inalloca disables readnone and readonly
- FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
- .removeAttribute(llvm::Attribute::ReadNone);
- break;
- }
-
+ case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
- llvm::AttrBuilder SRETAttrs;
- SRETAttrs.addAttribute(llvm::Attribute::StructRet);
- if (RetAI.getInReg())
- SRETAttrs.addAttribute(llvm::Attribute::InReg);
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- PAL.push_back(llvm::AttributeSet::get(
- getLLVMContext(), SwapThisWithSRet ? 2 : Index, SRETAttrs));
-
- if (!SwapThisWithSRet)
- ++Index;
- // sret disables readnone and readonly
+ // inalloca and sret disable readnone and readonly
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
break;
@@ -1211,28 +1499,44 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
RetAttrs.addAttribute(llvm::Attribute::NonNull);
}
- if (RetAttrs.hasAttributes())
- PAL.push_back(llvm::
- AttributeSet::get(getLLVMContext(),
- llvm::AttributeSet::ReturnIndex,
- RetAttrs));
+ // Attach return attributes.
+ if (RetAttrs.hasAttributes()) {
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
+ }
- for (const auto &I : FI.arguments()) {
- QualType ParamType = I.type;
- const ABIArgInfo &AI = I.info;
+ // Attach attributes to sret.
+ if (IRFunctionArgs.hasSRetArg()) {
+ llvm::AttrBuilder SRETAttrs;
+ SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ if (RetAI.getInReg())
+ SRETAttrs.addAttribute(llvm::Attribute::InReg);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
+ }
+
+ // Attach attributes to inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
llvm::AttrBuilder Attrs;
+ Attrs.addAttribute(llvm::Attribute::InAlloca);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
+ }
- // Skip over the sret parameter when it comes second. We already handled it
- // above.
- if (Index == 2 && SwapThisWithSRet)
- ++Index;
+ unsigned ArgNo = 0;
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
+ E = FI.arg_end();
+ I != E; ++I, ++ArgNo) {
+ QualType ParamType = I->type;
+ const ABIArgInfo &AI = I->info;
+ llvm::AttrBuilder Attrs;
- if (AI.getPaddingType()) {
+ // Add attribute for padding argument, if necessary.
+ if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
if (AI.getPaddingInReg())
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
- llvm::Attribute::InReg));
- // Increment Index if there is padding.
- ++Index;
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
+ llvm::Attribute::InReg));
}
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
@@ -1245,24 +1549,13 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
else if (ParamType->isUnsignedIntegerOrEnumerationType())
Attrs.addAttribute(llvm::Attribute::ZExt);
// FALL THROUGH
- case ABIArgInfo::Direct: {
- if (AI.getInReg())
+ case ABIArgInfo::Direct:
+ if (ArgNo == 0 && FI.isChainCall())
+ Attrs.addAttribute(llvm::Attribute::Nest);
+ else if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
-
- // FIXME: handle sseregparm someday...
-
- llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType());
- if (!isAAPCSVFP(FI, getTarget()) && STy) {
- unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
- if (Attrs.hasAttributes())
- for (unsigned I = 0; I < Extra; ++I)
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
- Attrs));
- Index += Extra;
- }
break;
- }
+
case ABIArgInfo::Indirect:
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
@@ -1278,26 +1571,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
break;
case ABIArgInfo::Ignore:
- // Skip increment, no matching LLVM parameter.
+ case ABIArgInfo::Expand:
continue;
case ABIArgInfo::InAlloca:
// inalloca disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
- // Skip increment, no matching LLVM parameter.
- continue;
-
- case ABIArgInfo::Expand: {
- SmallVector<llvm::Type*, 8> types;
- // FIXME: This is rather inefficient. Do we ever actually need to do
- // anything here? The result should be just reconstructed on the other
- // side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, types);
- Index += types.size();
continue;
}
- }
if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
@@ -1308,17 +1590,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attrs.addAttribute(llvm::Attribute::NonNull);
}
- if (Attrs.hasAttributes())
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
- ++Index;
- }
-
- // Add the inalloca attribute to the trailing inalloca parameter if present.
- if (FI.usesInAlloca()) {
- llvm::AttrBuilder Attrs;
- Attrs.addAttribute(llvm::Attribute::InAlloca);
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
+ if (Attrs.hasAttributes()) {
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+ for (unsigned i = 0; i < NumIRArgs; i++)
+ PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
+ FirstIRArg + i + 1, Attrs));
+ }
}
+ assert(ArgNo == FI.arg_size());
if (FuncAttrs.hasAttributes())
PAL.push_back(llvm::
@@ -1347,9 +1627,41 @@ static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
}
+/// Returns the attribute (either parameter attribute, or function
+/// attribute), which declares argument ArgNo to be non-null.
+static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
+ QualType ArgType, unsigned ArgNo) {
+ // FIXME: __attribute__((nonnull)) can also be applied to:
+ // - references to pointers, where the pointee is known to be
+ // nonnull (apparently a Clang extension)
+ // - transparent unions containing pointers
+ // In the former case, LLVM IR cannot represent the constraint. In
+ // the latter case, we have no guarantee that the transparent union
+ // is in fact passed as a pointer.
+ if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
+ return nullptr;
+ // First, check attribute on parameter itself.
+ if (PVD) {
+ if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
+ return ParmNNAttr;
+ }
+ // Check function attributes.
+ if (!FD)
+ return nullptr;
+ for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
+ if (NNAttr->isNonNull(ArgNo))
+ return NNAttr;
+ }
+ return nullptr;
+}
+
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
+ // Naked functions don't have prologues.
+ return;
+
// If this is an implicit-return-zero function, go ahead and
// initialize the return value. TODO: it might be nice to have
// a more general mechanism for this that didn't require synthesized
@@ -1366,39 +1678,31 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// FIXME: We no longer need the types from FunctionArgList; lift up and
// simplify.
- // Emit allocs for param decls. Give the LLVM Argument nodes names.
- llvm::Function::arg_iterator AI = Fn->arg_begin();
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
+ // Flattened function arguments.
+ SmallVector<llvm::Argument *, 16> FnArgs;
+ FnArgs.reserve(IRFunctionArgs.totalIRArgs());
+ for (auto &Arg : Fn->args()) {
+ FnArgs.push_back(&Arg);
+ }
+ assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
llvm::Value *ArgStruct = nullptr;
- if (FI.usesInAlloca()) {
- llvm::Function::arg_iterator EI = Fn->arg_end();
- --EI;
- ArgStruct = EI;
+ if (IRFunctionArgs.hasInallocaArg()) {
+ ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
}
- // Name the struct return parameter, which can come first or second.
- const ABIArgInfo &RetAI = FI.getReturnInfo();
- bool SwapThisWithSRet = false;
- if (RetAI.isIndirect()) {
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- if (SwapThisWithSRet)
- ++AI;
+ // Name the struct return parameter.
+ if (IRFunctionArgs.hasSRetArg()) {
+ auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
AI->setName("agg.result");
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
llvm::Attribute::NoAlias));
- if (SwapThisWithSRet)
- --AI; // Go back to the beginning for 'this'.
- else
- ++AI; // Skip the sret parameter.
}
- // Get the function-level nonnull attribute if it exists.
- const NonNullAttr *NNAtt =
- CurCodeDecl ? CurCodeDecl->getAttr<NonNullAttr>() : nullptr;
-
// Track if we received the parameter as a pointer (indirect, byval, or
// inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
// into a local alloca for us.
@@ -1413,9 +1717,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// we can push the cleanups in the correct order for the ABI.
assert(FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.");
- unsigned ArgNo = 1;
+ unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i, ++info_it, ++ArgNo) {
const VarDecl *Arg = *i;
QualType Ty = info_it->type;
@@ -1424,20 +1728,21 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
bool isPromoted =
isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
- // Skip the dummy padding argument.
- if (ArgI.getPaddingType())
- ++AI;
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgI.getKind()) {
case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
llvm::Value *V = Builder.CreateStructGEP(
ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName());
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
- continue; // Don't increment AI!
+ break;
}
case ABIArgInfo::Indirect: {
- llvm::Value *V = AI;
+ assert(NumIRArgs == 1);
+ llvm::Value *V = FnArgs[FirstIRArg];
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -1483,12 +1788,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
ArgI.getCoerceToType() == ConvertType(Ty) &&
ArgI.getDirectOffset() == 0) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
llvm::Value *V = AI;
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
- if ((NNAtt && NNAtt->isNonNull(PVD->getFunctionScopeIndex())) ||
- PVD->hasAttr<NonNullAttr>())
+ if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
+ PVD->getFunctionScopeIndex()))
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
@@ -1527,6 +1833,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
}
+
+ const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
+ if (!AVAttr)
+ if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
+ AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
+ if (AVAttr) {
+ llvm::Value *AlignmentValue =
+ EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI =
+ cast<llvm::ConstantInt>(AlignmentValue);
+ unsigned Alignment =
+ std::min((unsigned) AlignmentCI->getZExtValue(),
+ +llvm::Value::MaximumAlignment);
+
+ llvm::AttrBuilder Attrs;
+ Attrs.addAlignmentAttr(Alignment);
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1, Attrs));
+ }
}
if (Arg->getType().isRestrictQualified())
@@ -1581,13 +1906,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
}
- // If the coerce-to type is a first class aggregate, we flatten it and
- // pass the elements. Either way is semantically identical, but fast-isel
- // and the optimizer generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
- if (!isAAPCSVFP(FI, getTarget()) && STy && STy->getNumElements() > 1) {
+ if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
+ STy->getNumElements() > 1) {
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
@@ -1596,11 +1919,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (SrcSize <= DstSize) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+ assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateStore(AI, EltPtr);
}
} else {
llvm::AllocaInst *TempAlloca =
@@ -1608,20 +1932,22 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
TempAlloca->setAlignment(AlignmentToUse);
llvm::Value *TempV = TempAlloca;
+ assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateStore(AI, EltPtr);
}
Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
}
@@ -1634,7 +1960,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
} else {
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
}
- continue; // Skip ++AI increment, already done.
+ break;
}
case ABIArgInfo::Expand: {
@@ -1645,17 +1971,20 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
CharUnits Align = getContext().getDeclAlign(Arg);
Alloca->setAlignment(Align.getQuantity());
LValue LV = MakeAddrLValue(Alloca, Ty, Align);
- llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
- // Name the arguments used in expansion and increment AI.
- unsigned Index = 0;
- for (; AI != End; ++AI, ++Index)
- AI->setName(Arg->getName() + "." + Twine(Index));
- continue;
+ auto FnArgIter = FnArgs.begin() + FirstIRArg;
+ ExpandTypeFromArgs(Ty, LV, FnArgIter);
+ assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
+ for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
+ auto AI = FnArgs[FirstIRArg + i];
+ AI->setName(Arg->getName() + "." + Twine(i));
+ }
+ break;
}
case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
// Initialize the local variable appropriately.
if (!hasScalarEvaluationKind(Ty)) {
ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
@@ -1663,21 +1992,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
}
-
- // Skip increment, no matching LLVM parameter.
- continue;
+ break;
}
-
- ++AI;
-
- if (ArgNo == 1 && SwapThisWithSRet)
- ++AI; // Skip the sret parameter.
}
- if (FI.usesInAlloca())
- ++AI;
- assert(AI == Fn->arg_end() && "Argument mismatch!");
-
if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
for (int I = Args.size() - 1; I >= 0; --I)
EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
@@ -1887,6 +2205,12 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
bool EmitRetDbgLoc,
SourceLocation EndLoc) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
+ // Naked functions don't have epilogues.
+ Builder.CreateUnreachable();
+ return;
+ }
+
// Functions with no result always return void.
if (!ReturnValue) {
Builder.CreateRetVoid();
@@ -1998,7 +2322,26 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm_unreachable("Invalid ABI kind for return argument");
}
- llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ llvm::Instruction *Ret;
+ if (RV) {
+ if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
+ if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
+ SanitizerScope SanScope(this);
+ llvm::Value *Cond = Builder.CreateICmpNE(
+ RV, llvm::Constant::getNullValue(RV->getType()));
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(EndLoc),
+ EmitCheckSourceLocation(RetNNAttr->getLocation()),
+ };
+ EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
+ "nonnull_return", StaticData, None);
+ }
+ }
+ Ret = Builder.CreateRet(RV);
+ } else {
+ Ret = Builder.CreateRetVoid();
+ }
+
if (!RetDbgLoc.isUnknown())
Ret->setDebugLoc(RetDbgLoc);
}
@@ -2045,19 +2388,8 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
return args.add(RValue::get(Builder.CreateLoad(local)), type);
}
- if (isInAllocaArgument(CGM.getCXXABI(), type)) {
- AggValueSlot Slot = createPlaceholderSlot(*this, type);
- Slot.setExternallyDestructed();
-
- // FIXME: Either emit a copy constructor call, or figure out how to do
- // guaranteed tail calls with perfect forwarding in LLVM.
- CGM.ErrorUnsupported(param, "non-trivial argument copy for thunk");
- EmitNullInitialization(Slot.getAddr(), type);
-
- RValue RV = Slot.asRValue();
- args.add(RV, type);
- return;
- }
+ assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
+ "cannot emit delegate call arguments for inalloca arguments!");
args.add(convertTempToRValue(local, type, loc), type);
}
@@ -2317,10 +2649,36 @@ void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
}
}
+static void emitNonNullArgCheck(CodeGenFunction &CGF, RValue RV,
+ QualType ArgType, SourceLocation ArgLoc,
+ const FunctionDecl *FD, unsigned ParmNum) {
+ if (!CGF.SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
+ return;
+ auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
+ unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
+ auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
+ if (!NNAttr)
+ return;
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ assert(RV.isScalar());
+ llvm::Value *V = RV.getScalarVal();
+ llvm::Value *Cond =
+ CGF.Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
+ llvm::Constant *StaticData[] = {
+ CGF.EmitCheckSourceLocation(ArgLoc),
+ CGF.EmitCheckSourceLocation(NNAttr->getLocation()),
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgNo + 1),
+ };
+ CGF.EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
+ "nonnull_arg", StaticData, None);
+}
+
void CodeGenFunction::EmitCallArgs(CallArgList &Args,
ArrayRef<QualType> ArgTypes,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
+ const FunctionDecl *CalleeDecl,
+ unsigned ParamsToSkip,
bool ForceColumnInfo) {
CGDebugInfo *DI = getDebugInfo();
SourceLocation CallLoc;
@@ -2344,6 +2702,8 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
for (int I = ArgTypes.size() - 1; I >= 0; --I) {
CallExpr::const_arg_iterator Arg = ArgBeg + I;
EmitCallArg(Args, *Arg, ArgTypes[I]);
+ emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
// Restore the debug location.
if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
}
@@ -2358,6 +2718,8 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
CallExpr::const_arg_iterator Arg = ArgBeg + I;
assert(Arg != ArgEnd);
EmitCallArg(Args, *Arg, ArgTypes[I]);
+ emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
// Restore the debug location.
if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
}
@@ -2457,6 +2819,24 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
args.add(EmitAnyExprToTemp(E), type);
}
+QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
+ // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
+ // implicitly widens null pointer constants that are arguments to varargs
+ // functions to pointer-sized ints.
+ if (!getTarget().getTriple().isOSWindows())
+ return Arg->getType();
+
+ if (Arg->getType()->isIntegerType() &&
+ getContext().getTypeSize(Arg->getType()) <
+ getContext().getTargetInfo().getPointerWidth(0) &&
+ Arg->isNullPointerConstant(getContext(),
+ Expr::NPC_ValueDependentIsNotNull)) {
+ return getContext().getIntPtrType();
+ }
+
+ return Arg->getType();
+}
+
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
void
@@ -2471,7 +2851,7 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
- return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitNounwindRuntimeCall(callee, None, name);
}
/// Emits a call to the given nounwind runtime function.
@@ -2489,7 +2869,7 @@ CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
llvm::CallInst *
CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
- return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitRuntimeCall(callee, None, name);
}
/// Emits a simple call (never an invoke) to the given runtime
@@ -2528,7 +2908,7 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
llvm::CallSite
CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
const Twine &name) {
- return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitRuntimeCallOrInvoke(callee, None, name);
}
/// Emits a call or invoke instruction to the given runtime function.
@@ -2544,7 +2924,7 @@ CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
const Twine &Name) {
- return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
+ return EmitCallOrInvoke(Callee, None, Name);
}
/// Emits a call or invoke instruction to the given function, depending
@@ -2572,73 +2952,6 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
return Inst;
}
-static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
- llvm::FunctionType *FTy) {
- if (ArgNo < FTy->getNumParams())
- assert(Elt->getType() == FTy->getParamType(ArgNo));
- else
- assert(FTy->isVarArg());
- ++ArgNo;
-}
-
-void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
- SmallVectorImpl<llvm::Value *> &Args,
- llvm::FunctionType *IRFuncTy) {
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- unsigned NumElts = AT->getSize().getZExtValue();
- QualType EltTy = AT->getElementType();
- llvm::Value *Addr = RV.getAggregateAddr();
- for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
- RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation());
- ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
- }
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
- assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
- LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
-
- if (RD->isUnion()) {
- const FieldDecl *LargestFD = nullptr;
- CharUnits UnionSize = CharUnits::Zero();
-
- for (const auto *FD : RD->fields()) {
- assert(!FD->isBitField() &&
- "Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
- if (UnionSize < FieldSize) {
- UnionSize = FieldSize;
- LargestFD = FD;
- }
- }
- if (LargestFD) {
- RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation());
- ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
- }
- } else {
- for (const auto *FD : RD->fields()) {
- RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
- ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
- }
- }
- } else if (Ty->isAnyComplexType()) {
- ComplexPairTy CV = RV.getComplexVal();
- Args.push_back(CV.first);
- Args.push_back(CV.second);
- } else {
- assert(RV.isScalar() &&
- "Unexpected non-scalar rvalue during struct expansion.");
-
- // Insert a bitcast as needed.
- llvm::Value *V = RV.getScalarVal();
- if (Args.size() < IRFuncTy->getNumParams() &&
- V->getType() != IRFuncTy->getParamType(Args.size()))
- V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
-
- Args.push_back(V);
- }
-}
-
/// \brief Store a non-aggregate value to an address to initialize it. For
/// initialization, a non-atomic store will be used.
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
@@ -2661,15 +2974,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const Decl *TargetDecl,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
- SmallVector<llvm::Value*, 16> Args;
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
- // IRArgNo - Keep track of the argument number in the callee we're looking at.
- unsigned IRArgNo = 0;
llvm::FunctionType *IRFuncTy =
cast<llvm::FunctionType>(
cast<llvm::PointerType>(Callee->getType())->getElementType());
@@ -2691,22 +3001,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ArgMemory = AI;
}
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
+ SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
+
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
llvm::Value *SRetPtr = nullptr;
- bool SwapThisWithSRet = false;
if (RetAI.isIndirect() || RetAI.isInAlloca()) {
SRetPtr = ReturnValue.getValue();
if (!SRetPtr)
SRetPtr = CreateMemTemp(RetTy);
- if (RetAI.isIndirect()) {
- Args.push_back(SRetPtr);
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- if (SwapThisWithSRet)
- IRArgNo = 1;
- checkArgMatches(SRetPtr, IRArgNo, IRFuncTy);
- if (SwapThisWithSRet)
- IRArgNo = 0;
+ if (IRFunctionArgs.hasSRetArg()) {
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
} else {
llvm::Value *Addr =
Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
@@ -2716,26 +3022,26 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
+ unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
- I != E; ++I, ++info_it) {
+ I != E; ++I, ++info_it, ++ArgNo) {
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- // Skip 'sret' if it came second.
- if (IRArgNo == 1 && SwapThisWithSRet)
- ++IRArgNo;
-
CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
// Insert a padding argument to ensure proper alignment.
- if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
- Args.push_back(llvm::UndefValue::get(PaddingType));
- ++IRArgNo;
- }
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ llvm::UndefValue::get(ArgInfo.getPaddingType());
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgInfo.getKind()) {
case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (RV.isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
@@ -2761,22 +3067,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
}
- break; // Don't increment IRArgNo!
+ break;
}
case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (ArgInfo.getIndirectAlign() > AI->getAlignment())
AI->setAlignment(ArgInfo.getIndirectAlign());
- Args.push_back(AI);
+ IRCallArgs[FirstIRArg] = AI;
- LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
+ LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
-
- // Validate argument match.
- checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
@@ -2790,8 +3094,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
- const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
- IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
+ const unsigned ArgAddrSpace =
+ (FirstIRArg < IRFuncTy->getNumParams()
+ ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
+ : 0);
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
(ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
@@ -2800,23 +3106,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (Align > AI->getAlignment())
AI->setAlignment(Align);
- Args.push_back(AI);
+ IRCallArgs[FirstIRArg] = AI;
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
-
- // Validate argument match.
- checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
// Skip the extra memcpy call.
- Args.push_back(Addr);
-
- // Validate argument match.
- checkArgMatches(Addr, IRArgNo, IRFuncTy);
+ IRCallArgs[FirstIRArg] = Addr;
}
}
break;
}
case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
break;
case ABIArgInfo::Extend:
@@ -2824,20 +3125,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
+ assert(NumIRArgs == 1);
llvm::Value *V;
if (RV.isScalar())
V = RV.getScalarVal();
else
V = Builder.CreateLoad(RV.getAggregateAddr());
-
+
+ // We might have to widen integers, but we should never truncate.
+ if (ArgInfo.getCoerceToType() != V->getType() &&
+ V->getType()->isIntegerTy())
+ V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
+
// If the argument doesn't match, perform a bitcast to coerce it. This
// can happen due to trivial type mismatches.
- if (IRArgNo < IRFuncTy->getNumParams() &&
- V->getType() != IRFuncTy->getParamType(IRArgNo))
- V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
- Args.push_back(V);
-
- checkArgMatches(V, IRArgNo, IRFuncTy);
+ if (FirstIRArg < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(FirstIRArg))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
+ IRCallArgs[FirstIRArg] = V;
break;
}
@@ -2859,14 +3164,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
- // If the coerce-to type is a first class aggregate, we flatten it and
- // pass the elements. Either way is semantically identical, but fast-isel
- // and the optimizer generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
- if (STy && !isAAPCSVFP(CallInfo, getTarget())) {
+ if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -2886,38 +3188,32 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::PointerType::getUnqual(STy));
}
+ assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
// We don't know what we're loading from.
LI->setAlignment(1);
- Args.push_back(LI);
-
- // Validate argument match.
- checkArgMatches(LI, IRArgNo, IRFuncTy);
+ IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
- Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
- *this));
-
- // Validate argument match.
- checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
+ assert(NumIRArgs == 1);
+ IRCallArgs[FirstIRArg] =
+ CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
}
break;
}
case ABIArgInfo::Expand:
- ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
- IRArgNo = Args.size();
+ unsigned IRArgPos = FirstIRArg;
+ ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
+ assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
}
- if (SwapThisWithSRet)
- std::swap(Args[0], Args[1]);
-
if (ArgMemory) {
llvm::Value *Arg = ArgMemory;
if (CallInfo.isVariadic()) {
@@ -2948,7 +3244,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Arg = Builder.CreateBitCast(Arg, LastParamTy);
}
}
- Args.push_back(Arg);
+ assert(IRFunctionArgs.hasInallocaArg());
+ IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
if (!CallArgs.getCleanupsToDeactivate().empty())
@@ -2967,7 +3264,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
ActualFT->getNumParams() == CurFT->getNumParams() &&
- ActualFT->getNumParams() == Args.size() &&
+ ActualFT->getNumParams() == IRCallArgs.size() &&
(CurFT->isVarArg() || !ActualFT->isVarArg())) {
bool ArgsMatch = true;
for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
@@ -2984,6 +3281,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
+ assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
+ // Inalloca argument can have different type.
+ if (IRFunctionArgs.hasInallocaArg() &&
+ i == IRFunctionArgs.getInallocaArgNo())
+ continue;
+ if (i < IRFuncTy->getNumParams())
+ assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
+ }
+
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
@@ -2998,10 +3305,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, Args);
+ CS = Builder.CreateCall(Callee, IRCallArgs);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
EmitBlock(Cont);
}
if (callOrInvoke)
@@ -3050,75 +3357,92 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// lexical order, so deactivate it and run it manually here.
CallArgs.freeArgumentMemory(*this);
- switch (RetAI.getKind()) {
- case ABIArgInfo::InAlloca:
- case ABIArgInfo::Indirect:
- return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
+ RValue Ret = [&] {
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::InAlloca:
+ case ABIArgInfo::Indirect:
+ return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- case ABIArgInfo::Ignore:
- // If we are ignoring an argument that had a result, make sure to
- // construct the appropriate return value for our caller.
- return GetUndefRValue(RetTy);
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct: {
- llvm::Type *RetIRTy = ConvertType(RetTy);
- if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
- switch (getEvaluationKind(RetTy)) {
- case TEK_Complex: {
- llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
- llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- case TEK_Aggregate: {
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ case TEK_Aggregate: {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "agg.tmp");
- DestIsVolatile = false;
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
+ return RValue::getAggregate(DestPtr);
}
- BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
- return RValue::getAggregate(DestPtr);
- }
- case TEK_Scalar: {
- // If the argument doesn't match, perform a bitcast to coerce it. This
- // can happen due to trivial type mismatches.
- llvm::Value *V = CI;
- if (V->getType() != RetIRTy)
- V = Builder.CreateBitCast(V, RetIRTy);
- return RValue::get(V);
+ case TEK_Scalar: {
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
}
+
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
}
- llvm_unreachable("bad evaluation kind");
- }
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "coerce");
- DestIsVolatile = false;
+ return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
- // If the value is offset in memory, apply the offset now.
- llvm::Value *StorePtr = DestPtr;
- if (unsigned Offs = RetAI.getDirectOffset()) {
- StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
- StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
- StorePtr = Builder.CreateBitCast(StorePtr,
- llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
}
- CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- return convertTempToRValue(DestPtr, RetTy, SourceLocation());
- }
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ } ();
- case ABIArgInfo::Expand:
- llvm_unreachable("Invalid ABI kind for return argument");
+ if (Ret.isScalar() && TargetDecl) {
+ if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
+ llvm::Value *OffsetValue = nullptr;
+ if (const auto *Offset = AA->getOffset())
+ OffsetValue = EmitScalarExpr(Offset);
+
+ llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
+ EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
+ OffsetValue);
+ }
}
- llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ return Ret;
}
/* VarArg handling */
OpenPOWER on IntegriCloud