summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/TargetInfo.cpp
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2012-08-15 20:02:54 +0000
committerdim <dim@FreeBSD.org>2012-08-15 20:02:54 +0000
commit554bcb69c2d785a011a30e7db87a36a87fe7db10 (patch)
tree9abb1a658a297776086f4e0dfa6ca533de02104e /lib/CodeGen/TargetInfo.cpp
parentbb67ca86b31f67faee50bd10c3b036d65751745a (diff)
downloadFreeBSD-src-554bcb69c2d785a011a30e7db87a36a87fe7db10.zip
FreeBSD-src-554bcb69c2d785a011a30e7db87a36a87fe7db10.tar.gz
Vendor import of clang trunk r161861:
http://llvm.org/svn/llvm-project/cfe/trunk@161861
Diffstat (limited to 'lib/CodeGen/TargetInfo.cpp')
-rw-r--r--lib/CodeGen/TargetInfo.cpp383
1 files changed, 272 insertions, 111 deletions
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 2b71fdd..9c23ed9 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -413,12 +413,18 @@ static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
+ enum Class {
+ Integer,
+ Float
+ };
+
static const unsigned MinABIStackAlignInBytes = 4;
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
bool IsMMXDisabled;
bool IsWin32FloatStructABI;
+ unsigned DefaultNumRegisterParameters;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
@@ -434,33 +440,31 @@ class X86_32ABIInfo : public ABIInfo {
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
-public:
-
- ABIArgInfo classifyReturnType(QualType RetTy,
+ Class classify(QualType Ty) const;
+ ABIArgInfo classifyReturnType(QualType RetTy,
unsigned callingConvention) const;
+ ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy,
+ unsigned &FreeRegs) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
- virtual void computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- FI.getCallingConvention());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
- }
+public:
+ virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w)
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w,
+ unsigned r)
: ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
- IsMMXDisabled(m), IsWin32FloatStructABI(w) {}
+ IsMMXDisabled(m), IsWin32FloatStructABI(w),
+ DefaultNumRegisterParameters(r) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool d, bool p, bool m, bool w)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {}
+ bool d, bool p, bool m, bool w, unsigned r)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -626,6 +630,10 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
+ return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
+}
+
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
@@ -643,7 +651,7 @@ static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
i != e; ++i) {
QualType FT = i->getType();
- if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128)
+ if (isSSEVectorType(Context, FT))
return true;
if (isRecordWithSSEVectorType(Context, FT))
@@ -667,7 +675,8 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty))
+ if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
+ isRecordWithSSEVectorType(getContext(), Ty)))
return 16;
return MinABIStackAlignInBytes;
@@ -692,6 +701,57 @@ ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
return ABIArgInfo::getIndirect(StackAlign);
}
+X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
+ const Type *T = isSingleElementStruct(Ty, getContext());
+ if (!T)
+ T = Ty.getTypePtr();
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ BuiltinType::Kind K = BT->getKind();
+ if (K == BuiltinType::Float || K == BuiltinType::Double)
+ return Float;
+ }
+ return Integer;
+}
+
+ABIArgInfo
+X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty,
+ unsigned &FreeRegs) const {
+ // Common case first.
+ if (FreeRegs == 0)
+ return classifyArgumentType(Ty);
+
+ Class C = classify(Ty);
+ if (C == Float)
+ return classifyArgumentType(Ty);
+
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (SizeInRegs == 0)
+ return classifyArgumentType(Ty);
+
+ if (SizeInRegs > FreeRegs) {
+ FreeRegs = 0;
+ return classifyArgumentType(Ty);
+ }
+ assert(SizeInRegs >= 1 && SizeInRegs <= 3);
+ FreeRegs -= SizeInRegs;
+
+ // If it is a simple scalar, keep the type so that we produce a cleaner IR.
+ ABIArgInfo Foo = classifyArgumentType(Ty);
+ if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType())
+ return ABIArgInfo::getDirectInReg(Foo.getCoerceToType());
+ if (Foo.isExtend())
+ return ABIArgInfo::getExtendInReg(Foo.getCoerceToType());
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type*, 3> Elements;
+ for (unsigned I = 0; I < SizeInRegs; ++I)
+ Elements.push_back(Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ return ABIArgInfo::getDirectInReg(Result);
+}
+
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
// FIXME: Set alignment on indirect arguments.
if (isAggregateTypeForABI(Ty)) {
@@ -753,6 +813,28 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ FI.getCallingConvention());
+
+ unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() :
+ DefaultNumRegisterParameters;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect() && FreeRegs) {
+ --FreeRegs;
+ ABIArgInfo &Old = FI.getReturnInfo();
+ Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
+ Old.getIndirectByVal(),
+ Old.getIndirectRealign());
+ }
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentTypeWithReg(it->type, FreeRegs);
+}
+
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BPP = CGF.Int8PtrPtrTy;
@@ -1345,7 +1427,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// single eightbyte, each is classified separately. Each eightbyte gets
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
- uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
+ uint64_t Offset =
+ OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
@@ -1584,7 +1667,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
// If the base is after the span we care about, ignore it.
- unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
+ unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
if (BaseOffset >= EndBit) continue;
unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
@@ -2411,6 +2494,64 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+// PowerPC-64
+
+namespace {
+class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
//===----------------------------------------------------------------------===//
// ARM ABI Implementation
@@ -2559,7 +2700,8 @@ static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
// double, or 64-bit or 128-bit vectors.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() != BuiltinType::Float &&
- BT->getKind() != BuiltinType::Double)
+ BT->getKind() != BuiltinType::Double &&
+ BT->getKind() != BuiltinType::LongDouble)
return false;
} else if (const VectorType *VT = Ty->getAs<VectorType>()) {
unsigned VecSize = Context.getTypeSize(VT);
@@ -2615,19 +2757,23 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
}
}
+ // Support byval for ARM.
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) ||
+ getContext().getTypeAlign(Ty) > 64) {
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ }
+
// Otherwise, pass by coercing to a structure of the appropriate size.
- //
- // FIXME: This is kind of nasty... but there isn't much choice because the ARM
- // backend doesn't support byval.
- // FIXME: This doesn't handle alignment > 64 bits.
llvm::Type* ElemTy;
unsigned SizeRegs;
- if (getContext().getTypeAlign(Ty) > 32) {
- ElemTy = llvm::Type::getInt64Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
- } else {
+ // FIXME: Try to match the types of the arguments more accurately where
+ // we can.
+ if (getContext().getTypeAlign(Ty) <= 32) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ } else {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
llvm::Type *STy =
@@ -2833,14 +2979,14 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
//===----------------------------------------------------------------------===//
-// PTX ABI Implementation
+// NVPTX ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
-class PTXABIInfo : public ABIInfo {
+class NVPTXABIInfo : public ABIInfo {
public:
- PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+ NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
@@ -2850,16 +2996,16 @@ public:
CodeGenFunction &CFG) const;
};
-class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
+class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PTXTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
+ NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const;
};
-ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
+ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy))
@@ -2867,14 +3013,14 @@ ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect();
}
-ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const {
+ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty))
return ABIArgInfo::getIndirect(0);
return ABIArgInfo::getDirect();
}
-void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
@@ -2885,6 +3031,8 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
return;
// Calling convention as default by an ABI.
+ // We're still using the PTX_Kernel/PTX_Device calling conventions here,
+ // but we should switch to NVVM metadata later on.
llvm::CallingConv::ID DefaultCC;
const LangOptions &LangOpts = getContext().getLangOpts();
if (LangOpts.OpenCL || LangOpts.CUDA) {
@@ -2903,14 +3051,14 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
-llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CFG) const {
- llvm_unreachable("PTX does not support varargs");
+llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const {
+ llvm_unreachable("NVPTX does not support varargs");
}
-void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
- llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const{
+void NVPTXTargetCodeGenInfo::
+SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const{
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD) return;
@@ -3097,13 +3245,16 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
namespace {
class MipsABIInfo : public ABIInfo {
bool IsO32;
- unsigned MinABIStackAlignInBytes;
- llvm::Type* HandleAggregates(QualType Ty) const;
+ unsigned MinABIStackAlignInBytes, StackAlignInBytes;
+ void CoerceToIntArgs(uint64_t TySize,
+ SmallVector<llvm::Type*, 8> &ArgList) const;
+ llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
public:
MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
- ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {}
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
+ StackAlignInBytes(IsO32 ? 8 : 16) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
@@ -3132,36 +3283,56 @@ public:
};
}
+void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
+ SmallVector<llvm::Type*, 8> &ArgList) const {
+ llvm::IntegerType *IntTy =
+ llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
+
+ // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
+ for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
+ ArgList.push_back(IntTy);
+
+ // If necessary, add one more integer type to ArgList.
+ unsigned R = TySize % (MinABIStackAlignInBytes * 8);
+
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+}
+
// In N32/64, an aligned double precision floating point field is passed in
// a register.
-llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
- if (IsO32)
- return 0;
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
+ SmallVector<llvm::Type*, 8> ArgList, IntArgList;
+
+ if (IsO32) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
if (Ty->isComplexType())
return CGT.ConvertType(Ty);
const RecordType *RT = Ty->getAs<RecordType>();
- // Unions are passed in integer registers.
- if (!RT || !RT->isStructureOrClassType())
- return 0;
+ // Unions/vectors are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType()) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- uint64_t StructSize = getContext().getTypeSize(Ty);
- assert(!(StructSize % 8) && "Size of structure must be multiple of 8.");
+ assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
uint64_t LastOffset = 0;
unsigned idx = 0;
llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
- SmallVector<llvm::Type*, 8> ArgList;
// Iterate over fields in the struct/class and check if there are any aligned
// double fields.
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
- const QualType Ty = (*i)->getType();
+ const QualType Ty = i->getType();
const BuiltinType *BT = Ty->getAs<BuiltinType>();
if (!BT || BT->getKind() != BuiltinType::Double)
@@ -3180,43 +3351,33 @@ llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
LastOffset = Offset + 64;
}
- // This struct/class doesn't have an aligned double field.
- if (!LastOffset)
- return 0;
-
- // Add ((StructSize - LastOffset) / 64) args of type i64.
- for (unsigned N = (StructSize - LastOffset) / 64; N; --N)
- ArgList.push_back(I64);
-
- // If the size of the remainder is not zero, add one more integer type to
- // ArgList.
- unsigned R = (StructSize - LastOffset) % 64;
- if (R)
- ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+ CoerceToIntArgs(TySize - LastOffset, IntArgList);
+ ArgList.append(IntArgList.begin(), IntArgList.end());
return llvm::StructType::get(getVMContext(), ArgList);
}
llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
- // Padding is inserted only for N32/64.
- if (IsO32)
- return 0;
+ assert((Offset % MinABIStackAlignInBytes) == 0);
+
+ if ((Align - 1) & Offset)
+ return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
- assert(Align <= 16 && "Alignment larger than 16 not handled.");
- return (Align == 16 && Offset & 0xf) ?
- llvm::IntegerType::get(getVMContext(), 64) : 0;
+ return 0;
}
ABIArgInfo
MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
uint64_t OrigOffset = Offset;
- uint64_t TySize =
- llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8;
+ uint64_t TySize = getContext().getTypeSize(Ty);
uint64_t Align = getContext().getTypeAlign(Ty) / 8;
- Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8));
- Offset += TySize;
- if (isAggregateTypeForABI(Ty)) {
+ Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
+ (uint64_t)StackAlignInBytes);
+ Offset = llvm::RoundUpToAlignment(Offset, Align);
+ Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
+
+ if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
// Ignore empty aggregates.
if (TySize == 0)
return ABIArgInfo::getIgnore();
@@ -3224,20 +3385,15 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
// Records with non trivial destructors/constructors should not be passed
// by value.
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
- Offset = OrigOffset + 8;
+ Offset = OrigOffset + MinABIStackAlignInBytes;
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
- // If we have reached here, aggregates are passed either indirectly via a
- // byval pointer or directly by coercing to another structure type. In the
- // latter case, padding is inserted if the offset of the aggregate is
- // unaligned.
- llvm::Type *ResType = HandleAggregates(Ty);
-
- if (!ResType)
- return ABIArgInfo::getIndirect(0);
-
- return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset));
+ // If we have reached here, aggregates are passed directly by coercing to
+ // another structure type. Padding is inserted if the offset of the
+ // aggregate is unaligned.
+ return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
+ getPaddingType(Align, OrigOffset));
}
// Treat an enum type as its underlying type.
@@ -3253,7 +3409,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
llvm::Type*
MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
const RecordType *RT = RetTy->getAs<RecordType>();
- SmallVector<llvm::Type*, 2> RTList;
+ SmallVector<llvm::Type*, 8> RTList;
if (RT && RT->isStructureOrClassType()) {
const RecordDecl *RD = RT->getDecl();
@@ -3272,12 +3428,12 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
for (; b != e; ++b) {
- const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>();
+ const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
if (!BT || !BT->isFloatingPoint())
break;
- RTList.push_back(CGT.ConvertType((*b)->getType()));
+ RTList.push_back(CGT.ConvertType(b->getType()));
}
if (b == e)
@@ -3288,11 +3444,7 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
}
}
- RTList.push_back(llvm::IntegerType::get(getVMContext(),
- std::min(Size, (uint64_t)64)));
- if (Size > 64)
- RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64));
-
+ CoerceToIntArgs(Size, RTList);
return llvm::StructType::get(getVMContext(), RTList);
}
@@ -3302,11 +3454,15 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType() || Size == 0)
return ABIArgInfo::getIgnore();
- if (isAggregateTypeForABI(RetTy)) {
+ if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
if (Size <= 128) {
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect();
+ // O32 returns integer vectors in registers.
+ if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
+ return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+
if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
}
@@ -3327,7 +3483,7 @@ void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
RetInfo = classifyReturnType(FI.getReturnType());
// Check if a pointer to an aggregate is passed as a hidden argument.
- uint64_t Offset = RetInfo.isIndirect() ? 8 : 0;
+ uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
@@ -3634,10 +3790,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+ case llvm::Triple::ppc64:
+ return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
- case llvm::Triple::ptx32:
- case llvm::Triple::ptx64:
- return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
case llvm::Triple::mblaze:
return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
@@ -3653,8 +3811,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, true, true, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false,
+ CodeGenOpts.NumRegisterParameters));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3663,19 +3821,22 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
+ case llvm::Triple::Bitrig:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, true, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX,
+ false,
+ CodeGenOpts.NumRegisterParameters));
case llvm::Triple::Win32:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, true, DisableMMX, true));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true,
+ CodeGenOpts.NumRegisterParameters));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(
- Types, false, false, DisableMMX, false));
+ new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX,
+ false,
+ CodeGenOpts.NumRegisterParameters));
}
}
OpenPOWER on IntegriCloud