summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/CGCall.cpp
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2010-09-17 15:54:40 +0000
committerdim <dim@FreeBSD.org>2010-09-17 15:54:40 +0000
commit36c49e3f258dced101949edabd72e9bc3f1dedc4 (patch)
tree0bbe07708f7571f8b5291f6d7b96c102b7c99dee /lib/CodeGen/CGCall.cpp
parentfc84956ac8b7cd244ef30e7a4d4d38a58dec5904 (diff)
downloadFreeBSD-src-36c49e3f258dced101949edabd72e9bc3f1dedc4.zip
FreeBSD-src-36c49e3f258dced101949edabd72e9bc3f1dedc4.tar.gz
Vendor import of clang r114020 (from the release_28 branch):
http://llvm.org/svn/llvm-project/cfe/branches/release_28@114020 Approved by: rpaulo (mentor)
Diffstat (limited to 'lib/CodeGen/CGCall.cpp')
-rw-r--r--lib/CodeGen/CGCall.cpp481
1 files changed, 260 insertions, 221 deletions
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 3d1e143..475dfa5 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "CGCall.h"
+#include "CGCXXABI.h"
#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@@ -35,6 +36,7 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ // TODO: add support for CC_X86Pascal to llvm
}
}
@@ -99,6 +101,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
+ if (D->hasAttr<PascalAttr>())
+ return CC_X86Pascal;
+
return CC_C;
}
@@ -116,6 +121,9 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
llvm::SmallVector<CanQualType, 16> ArgTys;
+ assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
+ assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
+
// Add the 'this' pointer unless this is a static method.
if (MD->isInstance())
ArgTys.push_back(GetThisType(Context, MD->getParent()));
@@ -126,29 +134,32 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
CXXCtorType Type) {
llvm::SmallVector<CanQualType, 16> ArgTys;
-
- // Add the 'this' pointer.
ArgTys.push_back(GetThisType(Context, D->getParent()));
+ CanQualType ResTy = Context.VoidTy;
- // Check if we need to add a VTT parameter (which has type void **).
- if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
- ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+ TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
- return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+
+ // Add the formal parameters.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+
+ return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
CXXDtorType Type) {
- llvm::SmallVector<CanQualType, 16> ArgTys;
-
- // Add the 'this' pointer.
+ llvm::SmallVector<CanQualType, 2> ArgTys;
ArgTys.push_back(GetThisType(Context, D->getParent()));
-
- // Check if we need to add a VTT parameter (which has type void **).
- if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
- ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+ CanQualType ResTy = Context.VoidTy;
+
+ TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
- return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+
+ return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
@@ -243,34 +254,26 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
- // ABI lowering wants to know what our preferred type for the argument is in
- // various situations, pass it in.
- llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes;
- for (llvm::SmallVectorImpl<CanQualType>::const_iterator
- I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) {
- // If this is being called from the guts of the ConvertType loop, make sure
- // to call ConvertTypeRecursive so we don't get into issues with cyclic
- // pointer type structures.
- PreferredArgTypes.push_back(ConvertTypeRecursive(*I));
- }
-
// Compute ABI information.
- getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(),
- PreferredArgTypes.data(), PreferredArgTypes.size());
+ getABIInfo().computeInfo(*FI);
+
+ // Loop over all of the computed argument and return value info. If any of
+ // them are direct or extend without a specified coerce type, specify the
+ // default now.
+ ABIArgInfo &RetInfo = FI->getReturnInfo();
+ if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
+ RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
+
+ for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
+ I != E; ++I)
+ if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
+ I->info.setCoerceToType(ConvertTypeRecursive(I->type));
// If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
// types, resolve them now. These pointers may point to this function, which
// we *just* filled in the FunctionInfo for.
- if (!IsRecursive && !PointersToResolve.empty()) {
- // Use PATypeHolder's so that our preferred types don't dangle under
- // refinement.
- llvm::SmallVector<llvm::PATypeHolder, 8> Handles(PreferredArgTypes.begin(),
- PreferredArgTypes.end());
+ if (!IsRecursive && !PointersToResolve.empty())
HandleLateResolvedPointers();
- PreferredArgTypes.clear();
- PreferredArgTypes.append(Handles.begin(), Handles.end());
- }
-
return *FI;
}
@@ -311,11 +314,10 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty,
"Cannot expand structure with bit-field members.");
QualType FT = FD->getType();
- if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ if (CodeGenFunction::hasAggregateLLVMType(FT))
GetExpandedTypes(FT, ArgTys, IsRecursive);
- } else {
+ else
ArgTys.push_back(ConvertType(FT, IsRecursive));
- }
}
}
@@ -613,7 +615,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ResultType = ConvertType(RetTy, IsRecursive);
+ ResultType = RetAI.getCoerceToType();
break;
case ABIArgInfo::Indirect: {
@@ -627,10 +629,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
case ABIArgInfo::Ignore:
ResultType = llvm::Type::getVoidTy(getLLVMContext());
break;
-
- case ABIArgInfo::Coerce:
- ResultType = RetAI.getCoerceToType();
- break;
}
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
@@ -641,7 +639,15 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::Coerce: {
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
+ ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
@@ -655,18 +661,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
break;
}
- case ABIArgInfo::Indirect: {
- // indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
- ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
- break;
- }
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- ArgTys.push_back(ConvertType(it->type, IsRecursive));
- break;
-
case ABIArgInfo::Expand:
GetExpandedTypes(it->type, ArgTys, IsRecursive);
break;
@@ -676,12 +670,18 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
}
-const llvm::Type *
-CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
+const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- if (!VerifyFuncTypeComplete(FPT))
- return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false);
+ if (!VerifyFuncTypeComplete(FPT)) {
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ else
+ Info = &getFunctionInfo(MD);
+ return GetFunctionType(*Info, FPT->isVariadic(), false);
+ }
return llvm::OpaqueType::get(getLLVMContext());
}
@@ -730,13 +730,13 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
- if (RetTy->isSignedIntegerType()) {
+ if (RetTy->hasSignedIntegerRepresentation())
RetAttrs |= llvm::Attribute::SExt;
- } else if (RetTy->isUnsignedIntegerType()) {
+ else if (RetTy->hasUnsignedIntegerRepresentation())
RetAttrs |= llvm::Attribute::ZExt;
- }
- // FALLTHROUGH
+ break;
case ABIArgInfo::Direct:
+ case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Indirect:
@@ -748,10 +748,6 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
llvm::Attribute::ReadNone);
break;
- case ABIArgInfo::Ignore:
- case ABIArgInfo::Coerce:
- break;
-
case ABIArgInfo::Expand:
assert(0 && "Invalid ABI kind for return argument");
}
@@ -759,7 +755,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (RetAttrs)
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
- // FIXME: we need to honour command line settings also...
+ // FIXME: we need to honor command line settings also.
// FIXME: RegParm should be reduced in case of nested functions and/or global
// register variable.
signed RegParm = FI.getRegParm();
@@ -774,15 +770,27 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
// sense to do it here because parameters are so fucked up.
-
switch (AI.getKind()) {
- case ABIArgInfo::Coerce:
+ case ABIArgInfo::Extend:
+ if (ParamType->isSignedIntegerType())
+ Attributes |= llvm::Attribute::SExt;
+ else if (ParamType->isUnsignedIntegerType())
+ Attributes |= llvm::Attribute::ZExt;
+ // FALL THROUGH
+ case ABIArgInfo::Direct:
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attributes |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+
if (const llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType()))
- Index += STy->getNumElements();
- else
- ++Index;
- continue; // Skip index increment.
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements()-1; // 1 will be added below.
+ break;
case ABIArgInfo::Indirect:
if (AI.getIndirectByVal())
@@ -795,24 +803,6 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
llvm::Attribute::ReadNone);
break;
- case ABIArgInfo::Extend:
- if (ParamType->isSignedIntegerType()) {
- Attributes |= llvm::Attribute::SExt;
- } else if (ParamType->isUnsignedIntegerType()) {
- Attributes |= llvm::Attribute::ZExt;
- }
- // FALLS THROUGH
- case ABIArgInfo::Direct:
- if (RegParm > 0 &&
- (ParamType->isIntegerType() || ParamType->isPointerType())) {
- RegParm -=
- (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
- if (RegParm >= 0)
- Attributes |= llvm::Attribute::InReg;
- }
- // FIXME: handle sseregparm someday...
- break;
-
case ABIArgInfo::Ignore:
// Skip increment, no matching LLVM parameter.
continue;
@@ -881,7 +871,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// reference.
} else {
// Load scalar value from indirect argument.
- V = EmitLoadOfScalar(V, false, Ty);
+ unsigned Alignment = getContext().getTypeAlignInChars(Ty).getQuantity();
+ V = EmitLoadOfScalar(V, false, Alignment, Ty);
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
// This must be a promotion, for something like
// "void a(x) short x; {..."
@@ -894,15 +885,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
- llvm::Value *V = AI;
- if (hasAggregateLLVMType(Ty)) {
- // Create a temporary alloca to hold the argument; the rest of
- // codegen expects to access aggregates & complex values by
- // reference.
- V = CreateMemTemp(Ty);
- Builder.CreateStore(AI, V);
- } else {
+ // If we have the trivial case, handle it with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value *V = AI;
+
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
@@ -911,53 +900,36 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// "void a(x) short x; {..."
V = EmitScalarConversion(V, Ty, Arg->getType());
}
+ EmitParmDecl(*Arg, V);
+ break;
}
- EmitParmDecl(*Arg, V);
- break;
- }
-
- case ABIArgInfo::Expand: {
- // If this structure was expanded into multiple arguments then
- // we need to create a temporary and reconstruct it from the
- // arguments.
- llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
- // FIXME: What are the right qualifiers here?
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
- EmitParmDecl(*Arg, Temp);
-
- // Name the arguments used in expansion and increment AI.
- unsigned Index = 0;
- for (; AI != End; ++AI, ++Index)
- AI->setName(Arg->getName() + "." + llvm::Twine(Index));
- continue;
- }
-
- case ABIArgInfo::Ignore:
- // Initialize the local variable appropriately.
- if (hasAggregateLLVMType(Ty)) {
- EmitParmDecl(*Arg, CreateMemTemp(Ty));
- } else {
- EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
- }
-
- // Skip increment, no matching LLVM parameter.
- continue;
- case ABIArgInfo::Coerce: {
- // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
- // result in a new alloca anyway, so we could just store into that
- // directly if we broke the abstraction down more.
llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
- Alloca->setAlignment(getContext().getDeclAlign(Arg).getQuantity());
+
+ // The alignment we need to use is the max of the requested alignment for
+ // the argument plus the alignment required by our access code below.
+ unsigned AlignmentToUse =
+ CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ AlignmentToUse = std::max(AlignmentToUse,
+ (unsigned)getContext().getDeclAlign(Arg).getQuantity());
+
+ Alloca->setAlignment(AlignmentToUse);
llvm::Value *V = Alloca;
+ llvm::Value *Ptr = V; // Pointer to store into.
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgI.getDirectOffset()) {
+ Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
+ }
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
- llvm::Value *Ptr = V;
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -970,13 +942,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Simple case, just do a coerced store of the argument into the alloca.
assert(AI != Fn->arg_end() && "Argument mismatch!");
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
}
// Match to what EmitParmDecl is expecting for this type.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
- V = EmitLoadOfScalar(V, false, Ty);
+ V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
// This must be a promotion, for something like
// "void a(x) short x; {..."
@@ -986,6 +958,32 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
EmitParmDecl(*Arg, V);
continue; // Skip ++AI increment, already done.
}
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
+ EmitParmDecl(*Arg, Temp);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Arg->getName() + "." + llvm::Twine(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty))
+ EmitParmDecl(*Arg, CreateMemTemp(Ty));
+ else
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
}
++AI;
@@ -1000,13 +998,14 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
return;
}
- llvm::MDNode *RetDbgInfo = 0;
+ llvm::DebugLoc RetDbgLoc;
llvm::Value *RV = 0;
QualType RetTy = FI.getReturnType();
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
- case ABIArgInfo::Indirect:
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType()) {
ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
StoreComplexToAddr(RT, CurFn->arg_begin(), false);
@@ -1014,43 +1013,54 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Do nothing; aggregrates get evaluated directly into the destination.
} else {
EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
- false, RetTy);
+ false, Alignment, RetTy);
}
break;
+ }
case ABIArgInfo::Extend:
- case ABIArgInfo::Direct: {
- // The internal return value temp always will have pointer-to-return-type
- // type, just do a load.
-
- // If the instruction right before the insertion point is a store to the
- // return value, we can elide the load, zap the store, and usually zap the
- // alloca.
- llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
- llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
- !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
- SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
- RV = Builder.CreateLoad(ReturnValue);
+ case ABIArgInfo::Direct:
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If the instruction right before the insertion point is a store to the
+ // return value, we can elide the load, zap the store, and usually zap the
+ // alloca.
+ llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
+ llvm::StoreInst *SI = 0;
+ if (InsertBB->empty() ||
+ !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
+ SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
+ RV = Builder.CreateLoad(ReturnValue);
+ } else {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgLoc = SI->getDebugLoc();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
+ }
} else {
- // Get the stored value and nuke the now-dead store.
- RetDbgInfo = SI->getDbgMetadata();
- RV = SI->getValueOperand();
- SI->eraseFromParent();
-
- // If that was the only use of the return value, nuke it as well now.
- if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
- cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
- ReturnValue = 0;
+ llvm::Value *V = ReturnValue;
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
+ V = Builder.CreateConstGEP1_32(V, Offs);
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
}
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
break;
- }
- case ABIArgInfo::Ignore:
- break;
- case ABIArgInfo::Coerce:
- RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Expand:
@@ -1058,8 +1068,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
}
llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
- if (RetDbgInfo)
- Ret->setDbgMetadata(RetDbgInfo);
+ if (!RetDbgLoc.isUnknown())
+ Ret->setDebugLoc(RetDbgLoc);
}
RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
@@ -1089,7 +1099,8 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
if (hasAggregateLLVMType(ArgType))
return RValue::getAggregate(Local);
- return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
+ unsigned Alignment = getContext().getDeclAlign(Param).getQuantity();
+ return RValue::get(EmitLoadOfScalar(Local, false, Alignment, ArgType));
}
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
@@ -1149,49 +1160,60 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->first;
+ unsigned Alignment =
+ getContext().getTypeAlignInChars(I->second).getQuantity();
switch (ArgInfo.getKind()) {
- case ABIArgInfo::Indirect:
+ case ABIArgInfo::Indirect: {
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
Args.push_back(CreateMemTemp(I->second));
if (RV.isScalar())
- EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
+ Alignment, I->second);
else
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
} else {
Args.push_back(RV.getAggregateAddr());
}
break;
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- if (RV.isScalar()) {
- Args.push_back(RV.getScalarVal());
- } else if (RV.isComplex()) {
- llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
- Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
- Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
- Args.push_back(Tmp);
- } else {
- Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
- }
- break;
+ }
case ABIArgInfo::Ignore:
break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
+ if (RV.isScalar())
+ Args.push_back(RV.getScalarVal());
+ else
+ Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ break;
+ }
- case ABIArgInfo::Coerce: {
// FIXME: Avoid the conversion through memory if possible.
llvm::Value *SrcPtr;
if (RV.isScalar()) {
SrcPtr = CreateMemTemp(I->second, "coerce");
- EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment,
+ I->second);
} else if (RV.isComplex()) {
SrcPtr = CreateMemTemp(I->second, "coerce");
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
} else
SrcPtr = RV.getAggregateAddr();
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgInfo.getDirectOffset()) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
+ SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
+
+ }
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
@@ -1201,7 +1223,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::PointerType::getUnqual(STy));
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
- Args.push_back(Builder.CreateLoad(EltPtr));
+ llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
+ // We don't know what we're loading from.
+ LI->setAlignment(1);
+ Args.push_back(LI);
}
} else {
// In the simple case, just pass the coerced loaded value.
@@ -1294,39 +1319,43 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CI->setName("call");
switch (RetAI.getKind()) {
- case ABIArgInfo::Indirect:
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
return RValue::getAggregate(Args[0]);
- return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- if (RetTy->isAnyComplexType()) {
- llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
- llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
-
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "agg.tmp");
- DestIsVolatile = false;
- }
- Builder.CreateStore(CI, DestPtr, DestIsVolatile);
- return RValue::getAggregate(DestPtr);
- }
- return RValue::get(CI);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
+ }
case ABIArgInfo::Ignore:
// If we are ignoring an argument that had a result, make sure to
// construct the appropriate return value for our caller.
return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
- case ABIArgInfo::Coerce: {
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ Builder.CreateStore(CI, DestPtr, DestIsVolatile);
+ return RValue::getAggregate(DestPtr);
+ }
+ return RValue::get(CI);
+ }
+
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
@@ -1335,12 +1364,22 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestIsVolatile = false;
}
- CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
return RValue::getAggregate(DestPtr);
- return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
+ return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
}
case ABIArgInfo::Expand:
OpenPOWER on IntegriCloud