summaryrefslogtreecommitdiffstats
path: root/lib/Target/X86/X86FastISel.cpp
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2011-10-20 21:10:27 +0000
committerdim <dim@FreeBSD.org>2011-10-20 21:10:27 +0000
commit7b3392326c40c3c20697816acae597ba7b3144eb (patch)
tree2cbcf22585e99f8a87d12d5ff94f392c0d266819 /lib/Target/X86/X86FastISel.cpp
parent1176aa52646fe641a4243a246aa7f960c708a274 (diff)
downloadFreeBSD-src-7b3392326c40c3c20697816acae597ba7b3144eb.zip
FreeBSD-src-7b3392326c40c3c20697816acae597ba7b3144eb.tar.gz
Vendor import of llvm release_30 branch r142614:
http://llvm.org/svn/llvm-project/llvm/branches/release_30@142614
Diffstat (limited to 'lib/Target/X86/X86FastISel.cpp')
-rw-r--r--lib/Target/X86/X86FastISel.cpp93
1 files changed, 61 insertions, 32 deletions
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 21e163a..f912b28 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -22,6 +22,7 @@
#include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/GlobalAlias.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h"
@@ -59,8 +60,8 @@ public:
explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
- X86ScalarSSEf64 = Subtarget->hasSSE2();
- X86ScalarSSEf32 = Subtarget->hasSSE1();
+ X86ScalarSSEf64 = Subtarget->hasSSE2() || Subtarget->hasAVX();
+ X86ScalarSSEf32 = Subtarget->hasSSE1() || Subtarget->hasAVX();
}
virtual bool TargetSelectInstruction(const Instruction *I);
@@ -134,7 +135,7 @@ private:
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
- bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
+ bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
bool IsMemcpySmall(uint64_t Len);
@@ -144,7 +145,7 @@ private:
} // end anonymous namespace.
-bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
+bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
if (evt == MVT::Other || !evt.isSimple())
// Unhandled type. Halt "fast" selection and bail.
@@ -198,8 +199,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
RC = X86::GR64RegisterClass;
break;
case MVT::f32:
- if (Subtarget->hasSSE1()) {
- Opc = X86::MOVSSrm;
+ if (X86ScalarSSEf32) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
RC = X86::FR32RegisterClass;
} else {
Opc = X86::LD_Fp32m;
@@ -207,8 +208,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
}
break;
case MVT::f64:
- if (Subtarget->hasSSE2()) {
- Opc = X86::MOVSDrm;
+ if (X86ScalarSSEf64) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
RC = X86::FR64RegisterClass;
} else {
Opc = X86::LD_Fp64m;
@@ -250,10 +251,12 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
case MVT::i32: Opc = X86::MOV32mr; break;
case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
case MVT::f32:
- Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m;
+ Opc = X86ScalarSSEf32 ?
+ (Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m;
break;
case MVT::f64:
- Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
+ Opc = X86ScalarSSEf64 ?
+ (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;
break;
}
@@ -336,7 +339,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
U = C;
}
- if (const PointerType *Ty = dyn_cast<PointerType>(V->getType()))
+ if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
if (Ty->getAddressSpace() > 255)
// Fast instruction selection doesn't support the special
// address spaces.
@@ -399,7 +402,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = TD.getStructLayout(STy);
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
continue;
@@ -465,14 +468,23 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Handle constant address.
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- // Can't handle alternate code models or TLS yet.
+ // Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return false;
+ // Can't handle TLS yet.
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->isThreadLocal())
return false;
+ // Can't handle TLS yet, part 2 (this is slightly crazy, but this is how
+ // it works...).
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ if (const GlobalVariable *GVar =
+ dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)))
+ if (GVar->isThreadLocal())
+ return false;
+
// RIP-relative addresses can't have additional register operands, so if
// we've already folded stuff into the addressing mode, just force the
// global value into its own register, which we can use as the basereg.
@@ -658,6 +670,10 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
/// X86SelectStore - Select and emit code to implement store instructions.
bool X86FastISel::X86SelectStore(const Instruction *I) {
+ // Atomic stores need special handling.
+ if (cast<StoreInst>(I)->isAtomic())
+ return false;
+
MVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
return false;
@@ -780,6 +796,10 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
/// X86SelectLoad - Select and emit code to implement load instructions.
///
bool X86FastISel::X86SelectLoad(const Instruction *I) {
+ // Atomic loads need special handling.
+ if (cast<LoadInst>(I)->isAtomic())
+ return false;
+
MVT VT;
if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true))
return false;
@@ -797,14 +817,20 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
}
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
+ bool HasAVX = Subtarget->hasAVX();
+ bool X86ScalarSSEf32 = HasAVX || Subtarget->hasSSE1();
+ bool X86ScalarSSEf64 = HasAVX || Subtarget->hasSSE2();
+
switch (VT.getSimpleVT().SimpleTy) {
default: return 0;
case MVT::i8: return X86::CMP8rr;
case MVT::i16: return X86::CMP16rr;
case MVT::i32: return X86::CMP32rr;
case MVT::i64: return X86::CMP64rr;
- case MVT::f32: return Subtarget->hasSSE1() ? X86::UCOMISSrr : 0;
- case MVT::f64: return Subtarget->hasSSE2() ? X86::UCOMISDrr : 0;
+ case MVT::f32:
+ return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;
+ case MVT::f64:
+ return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;
}
}
@@ -1207,7 +1233,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
bool X86FastISel::X86SelectFPExt(const Instruction *I) {
// fpext from float to double.
- if (Subtarget->hasSSE2() &&
+ if (X86ScalarSSEf64 &&
I->getType()->isDoubleTy()) {
const Value *V = I->getOperand(0);
if (V->getType()->isFloatTy()) {
@@ -1226,7 +1252,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
}
bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
- if (Subtarget->hasSSE2()) {
+ if (X86ScalarSSEf64) {
if (I->getType()->isFloatTy()) {
const Value *V = I->getOperand(0);
if (V->getType()->isDoubleTy()) {
@@ -1365,6 +1391,9 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
case Intrinsic::memset: {
const MemSetInst &MSI = cast<MemSetInst>(I);
+ if (MSI.isVolatile())
+ return false;
+
unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
if (!MSI.getLength()->getType()->isIntegerTy(SizeWidth))
return false;
@@ -1411,7 +1440,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// Replace "add with overflow" intrinsics with an "add" instruction followed
// by a seto/setc instruction.
const Function *Callee = I.getCalledFunction();
- const Type *RetTy =
+ Type *RetTy =
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
MVT VT;
@@ -1484,8 +1513,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
return false;
- const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
bool isVarArg = FTy->isVarArg();
// Don't know how to handle Win64 varargs yet. Nothing special needed for
@@ -1547,8 +1576,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
Flags.setZExt();
if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) {
- const PointerType *Ty = cast<PointerType>(ArgVal->getType());
- const Type *ElementTy = Ty->getElementType();
+ PointerType *Ty = cast<PointerType>(ArgVal->getType());
+ Type *ElementTy = Ty->getElementType();
unsigned FrameSize = TD.getTypeAllocSize(ElementTy);
unsigned FrameAlign = CS.getParamAlignment(AttrInd);
if (!FrameAlign)
@@ -1600,7 +1629,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (ArgReg == 0) return false;
- const Type *ArgTy = ArgVal->getType();
+ Type *ArgTy = ArgVal->getType();
MVT ArgVT;
if (!isTypeLegal(ArgTy, ArgVT))
return false;
@@ -1709,7 +1738,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
assert(Res && "memcpy length already checked!"); (void)Res;
} else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
// If this is a really simple value, emit this with the Value* version
- //of X86FastEmitStore. If it isn't simple, we don't want to do this,
+ // of X86FastEmitStore. If it isn't simple, we don't want to do this,
// as it can cause us to reevaluate the argument.
X86FastEmitStore(ArgVT, ArgVal, AM);
} else {
@@ -1965,8 +1994,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
RC = X86::GR64RegisterClass;
break;
case MVT::f32:
- if (Subtarget->hasSSE1()) {
- Opc = X86::MOVSSrm;
+ if (X86ScalarSSEf32) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
RC = X86::FR32RegisterClass;
} else {
Opc = X86::LD_Fp32m;
@@ -1974,8 +2003,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
}
break;
case MVT::f64:
- if (Subtarget->hasSSE2()) {
- Opc = X86::MOVSDrm;
+ if (X86ScalarSSEf64) {
+ Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
RC = X86::FR64RegisterClass;
} else {
Opc = X86::LD_Fp64m;
@@ -2070,8 +2099,8 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
switch (VT.SimpleTy) {
default: return false;
case MVT::f32:
- if (Subtarget->hasSSE1()) {
- Opc = X86::FsFLD0SS;
+ if (X86ScalarSSEf32) {
+ Opc = Subtarget->hasAVX() ? X86::VFsFLD0SS : X86::FsFLD0SS;
RC = X86::FR32RegisterClass;
} else {
Opc = X86::LD_Fp032;
@@ -2079,8 +2108,8 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
}
break;
case MVT::f64:
- if (Subtarget->hasSSE2()) {
- Opc = X86::FsFLD0SD;
+ if (X86ScalarSSEf64) {
+ Opc = Subtarget->hasAVX() ? X86::VFsFLD0SD : X86::FsFLD0SD;
RC = X86::FR64RegisterClass;
} else {
Opc = X86::LD_Fp064;
OpenPOWER on IntegriCloud