summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/TargetInfo.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/TargetInfo.cpp')
-rw-r--r--lib/CodeGen/TargetInfo.cpp274
1 files changed, 161 insertions, 113 deletions
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 043ead7..df2c1bd 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -75,7 +75,7 @@ void ABIArgInfo::dump() const {
break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
- << " Byal=" << getIndirectByVal()
+ << " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
case Expand:
@@ -356,9 +356,9 @@ bool UseX86_MMXType(const llvm::Type *IRType) {
IRType->getScalarSizeInBits() != 64;
}
-static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) {
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) {
if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
return Ty;
@@ -374,6 +374,7 @@ class X86_32ABIInfo : public ABIInfo {
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
+ bool IsMMXDisabled;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
@@ -403,14 +404,15 @@ public:
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
- : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {}
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
+ IsMMXDisabled(m) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {}
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -425,9 +427,9 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const;
- const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
@@ -562,7 +564,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
} else if (SeltTy->isPointerType()) {
// FIXME: It would be really nice if this could come out as the proper
// pointer type.
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
+ llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
return ABIArgInfo::getDirect(PtrTy);
} else if (SeltTy->isVectorType()) {
// 64- and 128-bit vectors are never returned in a
@@ -699,8 +701,11 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
Size));
}
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+ llvm::Type *IRType = CGT.ConvertType(Ty);
if (UseX86_MMXType(IRType)) {
+ if (IsMMXDisabled)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ 64));
ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
return AAI;
@@ -820,6 +825,22 @@ class X86_64ABIInfo : public ABIInfo {
/// should just return Memory for the aggregate).
static Class merge(Class Accum, Class Field);
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
/// classify - Determine the x86_64 register classes in which the
/// given type T should be passed.
///
@@ -843,13 +864,13 @@ class X86_64ABIInfo : public ABIInfo {
/// also be ComplexX87.
void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
- const llvm::Type *Get16ByteVectorType(QualType Ty) const;
- const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
- const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be returned in memory.
@@ -921,9 +942,9 @@ public:
return false;
}
- const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
@@ -956,6 +977,39 @@ public:
}
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
// AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
// classified recursively so that always two fields are
@@ -1082,7 +1136,14 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// split.
if (OffsetBase && OffsetBase != 64)
Hi = Lo;
- } else if (Size == 128) {
+ } else if (Size == 128 || Size == 256) {
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
Lo = SSE;
Hi = SSEUp;
}
@@ -1121,8 +1182,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
@@ -1137,6 +1198,13 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = NoClass;
uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ if (Size > 128 && EltSize != 256)
+ return;
+
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
classify(AT->getElementType(), Offset, FieldLo, FieldHi);
@@ -1146,9 +1214,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
break;
}
- // Do post merger cleanup (see below). Only case we worry about is Memory.
- if (Hi == Memory)
- Lo = Memory;
+ postMerge(Size, Lo, Hi);
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
return;
}
@@ -1157,8 +1223,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
@@ -1209,9 +1275,17 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
bool BitField = i->isBitField();
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // four eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit wide vector could be used is when the struct
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
//
+ if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
+ Lo = Memory;
+ return;
+ }
// Note, skip this test for bit-fields, see below.
if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
Lo = Memory;
@@ -1257,31 +1331,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
break;
}
- // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
- //
- // (a) If one of the classes is MEMORY, the whole argument is
- // passed in memory.
- //
- // (b) If X87UP is not preceded by X87, the whole argument is
- // passed in memory.
- //
- // (c) If the size of the aggregate exceeds two eightbytes and the first
- // eight-byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole
- // argument is passed in memory.
- //
- // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
- //
- // Some of these are enforced by the merging logic. Others can arise
- // only with unions; for example:
- // union { _Complex double; unsigned; }
- //
- // Note that clauses (b) and (c) were added in 0.98.
- if (Hi == Memory)
- Lo = Memory;
- if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
- Lo = Memory;
- if (Hi == SSEUp && Lo != SSE)
- Hi = SSE;
+ postMerge(Size, Lo, Hi);
}
}
@@ -1321,24 +1371,25 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
return ABIArgInfo::getIndirect(Align);
}
-/// Get16ByteVectorType - The ABI specifies that a value should be passed in an
-/// full vector XMM register. Pick an LLVM IR type that will be passed as a
+/// GetByteVectorType - The ABI specifies that a value should be passed in an
+/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
/// vector register.
-const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const {
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
// Wrapper structs that just contain vectors are passed just like vectors,
// strip them off if present.
- const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
while (STy && STy->getNumElements() == 1) {
IRType = STy->getElementType(0);
STy = dyn_cast<llvm::StructType>(IRType);
}
// If the preferred type is a 16-byte vector, prefer to pass it.
- if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
- const llvm::Type *EltTy = VT->getElementType();
- if (VT->getBitWidth() == 128 &&
+ if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
+ llvm::Type *EltTy = VT->getElementType();
+ unsigned BitWidth = VT->getBitWidth();
+ if ((BitWidth == 128 || BitWidth == 256) &&
(EltTy->isFloatTy() || EltTy->isDoubleTy() ||
EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
@@ -1466,8 +1517,8 @@ static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
/// low 8 bytes of an XMM register, corresponding to the SSE class.
-const llvm::Type *X86_64ABIInfo::
-GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// The only three choices we have are either double, <2 x float>, or float. We
// pass as float if the last 4 bytes is just padding. This happens for
@@ -1501,8 +1552,8 @@ GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// SourceTy is the source level type for the entire argument. SourceOffset is
/// an offset into this that we're processing (which is always either 0 or 8).
///
-const llvm::Type *X86_64ABIInfo::
-GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// If we're dealing with an un-offset LLVM IR type, then it means that we're
// returning an 8-byte unit starting with it. See if we can safely use it.
@@ -1540,7 +1591,7 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
}
if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- const llvm::Type *EltTy = ATy->getElementType();
+ llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
@@ -1566,8 +1617,8 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// first class aggregate to represent them. For example, if the low part of
/// a by-value argument should be passed as i32* and the high part as float,
/// return {i32*, float}.
-static const llvm::Type *
-GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
const llvm::TargetData &TD) {
// In order to correctly satisfy the ABI, we need to the high part to start
// at offset 8. If the high and low parts we inferred are both 4-byte types
@@ -1594,8 +1645,7 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
}
}
- const llvm::StructType *Result =
- llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
// Verify that the second element is at an 8-byte offset.
@@ -1615,7 +1665,7 @@ classifyReturnType(QualType RetTy) const {
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
- const llvm::Type *ResType = 0;
+ llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
@@ -1638,8 +1688,7 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
case Integer:
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0,
- RetTy, 0);
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
@@ -1657,7 +1706,7 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
// available SSE register of the sequence %xmm0, %xmm1 is used.
case SSE:
- ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0);
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
break;
// AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
@@ -1671,14 +1720,13 @@ classifyReturnType(QualType RetTy) const {
// %st1.
case ComplexX87:
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(getVMContext(),
- llvm::Type::getX86_FP80Ty(getVMContext()),
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
llvm::Type::getX86_FP80Ty(getVMContext()),
NULL);
break;
}
- const llvm::Type *HighPart = 0;
+ llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously and X87 should
// never occur as a hi class.
@@ -1691,24 +1739,24 @@ classifyReturnType(QualType RetTy) const {
break;
case Integer:
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
- 8, RetTy, 8);
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
- // is passed in the upper half of the last used SSE register.
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
//
// SSEUP should always be preceded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = Get16ByteVectorType(RetTy);
+ ResType = GetByteVectorType(RetTy);
break;
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
@@ -1719,8 +1767,7 @@ classifyReturnType(QualType RetTy) const {
// preceded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
if (Lo != X87) {
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
- 8, RetTy, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
}
@@ -1748,7 +1795,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
neededInt = 0;
neededSSE = 0;
- const llvm::Type *ResType = 0;
+ llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
@@ -1767,6 +1814,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// COMPLEX_X87, it is passed in memory.
case X87:
case ComplexX87:
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ ++neededInt;
return getIndirectResult(Ty);
case SSEUp:
@@ -1780,7 +1829,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
++neededInt;
// Pick an 8-byte type based on the preferred type.
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
@@ -1800,19 +1849,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// available SSE register is used, the registers are taken in the
// order from %xmm0 to %xmm7.
case SSE: {
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
- if (Hi != NoClass || !UseX86_MMXType(IRType))
- ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
- else
- // This is an MMX type. Treat it as such.
- ResType = llvm::Type::getX86_MMXTy(getVMContext());
-
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
++neededSSE;
break;
}
}
- const llvm::Type *HighPart = 0;
+ llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously, ComplexX87 and X87 should
// never occur as hi classes, and X87Up must be preceded by X87,
@@ -1828,7 +1872,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case Integer:
++neededInt;
// Pick an 8-byte type based on the preferred type.
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
@@ -1838,7 +1882,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// memory), except in situations involving unions.
case X87Up:
case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
@@ -1851,7 +1895,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// register. This only happens when 128-bit vectors are passed.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification");
- ResType = Get16ByteVectorType(Ty);
+ ResType = GetByteVectorType(Ty);
break;
}
@@ -2059,10 +2103,10 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// area, we need to collect the two eightbytes together.
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
- const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
const llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ const llvm::StructType *ST = llvm::StructType::get(DoubleTy,
DoubleTy, NULL);
llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
@@ -2277,6 +2321,10 @@ public:
return 13;
}
+ llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
+ }
+
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
CodeGen::CGBuilderTy &Builder = CGF.Builder;
@@ -2290,8 +2338,6 @@ public:
return false;
}
-
-
};
}
@@ -2371,9 +2417,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
- const llvm::Type *STy =
- llvm::StructType::get(getVMContext(),
- llvm::ArrayType::get(ElemTy, SizeRegs), NULL, NULL);
+ llvm::Type *STy =
+ llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
return ABIArgInfo::getDirect(STy);
}
@@ -3010,10 +3055,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
- case llvm::Triple::x86:
+ case llvm::Triple::x86: {
+ bool DisableMMX = strcmp(getContext().Target.getABI(), "no-mmx") == 0;
+
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, true, true));
+ new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3024,12 +3071,13 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::OpenBSD:
case llvm::Triple::NetBSD:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, true));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, false));
+ new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX));
}
+ }
case llvm::Triple::x86_64:
switch (Triple.getOS()) {
OpenPOWER on IntegriCloud