summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp')
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp137
1 files changed, 89 insertions, 48 deletions
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index 13bd0c7..a561d5b 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -17,6 +17,7 @@
#include "MCTargetDesc/PPCPredicates.h"
#include "PPCCallingConv.h"
#include "PPCISelLowering.h"
+#include "PPCMachineFunctionInfo.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/Optional.h"
@@ -85,18 +86,20 @@ typedef struct Address {
class PPCFastISel final : public FastISel {
const TargetMachine &TM;
+ const PPCSubtarget *PPCSubTarget;
+ PPCFunctionInfo *PPCFuncInfo;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- const PPCSubtarget *PPCSubTarget;
LLVMContext *Context;
public:
explicit PPCFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo)
: FastISel(FuncInfo, LibInfo), TM(FuncInfo.MF->getTarget()),
- TII(*TM.getSubtargetImpl()->getInstrInfo()),
- TLI(*TM.getSubtargetImpl()->getTargetLowering()),
- PPCSubTarget(&TM.getSubtarget<PPCSubtarget>()),
+ PPCSubTarget(&FuncInfo.MF->getSubtarget<PPCSubtarget>()),
+ PPCFuncInfo(FuncInfo.MF->getInfo<PPCFunctionInfo>()),
+ TII(*PPCSubTarget->getInstrInfo()),
+ TLI(*PPCSubTarget->getTargetLowering()),
Context(&FuncInfo.Fn->getContext()) {}
// Backend specific FastISel code.
@@ -141,9 +144,13 @@ class PPCFastISel final : public FastISel {
private:
bool isTypeLegal(Type *Ty, MVT &VT);
bool isLoadTypeLegal(Type *Ty, MVT &VT);
+ bool isValueAvailable(const Value *V) const;
bool isVSFRCRegister(unsigned Register) const {
return MRI.getRegClass(Register)->getID() == PPC::VSFRCRegClassID;
}
+ bool isVSSRCRegister(unsigned Register) const {
+ return MRI.getRegClass(Register)->getID() == PPC::VSSRCRegClassID;
+ }
bool PPCEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt, unsigned DestReg);
bool PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
@@ -280,6 +287,17 @@ bool PPCFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
return false;
}
+bool PPCFastISel::isValueAvailable(const Value *V) const {
+ if (!isa<Instruction>(V))
+ return true;
+
+ const auto *I = cast<Instruction>(V);
+ if (FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB)
+ return true;
+
+ return false;
+}
+
// Given a value Obj, create an Address object Addr that represents its
// address. Return false if we can't handle it.
bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
@@ -488,8 +506,11 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// If this is a potential VSX load with an offset of 0, a VSX indexed load can
// be used.
+ bool IsVSSRC = (ResultReg != 0) && isVSSRCRegister(ResultReg);
bool IsVSFRC = (ResultReg != 0) && isVSFRCRegister(ResultReg);
- if (IsVSFRC && (Opc == PPC::LFD) &&
+ bool Is32VSXLoad = IsVSSRC && Opc == PPC::LFS;
+ bool Is64VSXLoad = IsVSSRC && Opc == PPC::LFD;
+ if ((Is32VSXLoad || Is64VSXLoad) &&
(Addr.BaseType != Address::FrameIndexBase) && UseOffset &&
(Addr.Offset == 0)) {
UseOffset = false;
@@ -503,7 +524,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// into a RegBase.
if (Addr.BaseType == Address::FrameIndexBase) {
// VSX only provides an indexed load.
- if (IsVSFRC && Opc == PPC::LFD) return false;
+ if (Is32VSXLoad || Is64VSXLoad) return false;
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
@@ -517,7 +538,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// Base reg with offset in range.
} else if (UseOffset) {
// VSX only provides an indexed load.
- if (IsVSFRC && Opc == PPC::LFD) return false;
+ if (Is32VSXLoad || Is64VSXLoad) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(Addr.Offset).addReg(Addr.Base.Reg);
@@ -540,7 +561,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
case PPC::LWA: Opc = PPC::LWAX; break;
case PPC::LWA_32: Opc = PPC::LWAX_32; break;
case PPC::LD: Opc = PPC::LDX; break;
- case PPC::LFS: Opc = PPC::LFSX; break;
+ case PPC::LFS: Opc = IsVSSRC ? PPC::LXSSPX : PPC::LFSX; break;
case PPC::LFD: Opc = IsVSFRC ? PPC::LXSDX : PPC::LFDX; break;
}
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
@@ -621,9 +642,12 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
// If this is a potential VSX store with an offset of 0, a VSX indexed store
// can be used.
+ bool IsVSSRC = isVSSRCRegister(SrcReg);
bool IsVSFRC = isVSFRCRegister(SrcReg);
- if (IsVSFRC && (Opc == PPC::STFD) &&
- (Addr.BaseType != Address::FrameIndexBase) && UseOffset &&
+ bool Is32VSXStore = IsVSSRC && Opc == PPC::STFS;
+ bool Is64VSXStore = IsVSFRC && Opc == PPC::STFD;
+ if ((Is32VSXStore || Is64VSXStore) &&
+ (Addr.BaseType != Address::FrameIndexBase) && UseOffset &&
(Addr.Offset == 0)) {
UseOffset = false;
}
@@ -633,7 +657,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
// into a RegBase.
if (Addr.BaseType == Address::FrameIndexBase) {
// VSX only provides an indexed store.
- if (IsVSFRC && Opc == PPC::STFD) return false;
+ if (Is32VSXStore || Is64VSXStore) return false;
MachineMemOperand *MMO =
FuncInfo.MF->getMachineMemOperand(
@@ -650,7 +674,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
// Base reg with offset in range.
} else if (UseOffset) {
// VSX only provides an indexed store.
- if (IsVSFRC && Opc == PPC::STFD) return false;
+ if (Is32VSXStore || Is64VSXStore) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg);
@@ -669,11 +693,21 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
case PPC::STH8: Opc = PPC::STHX8; break;
case PPC::STW8: Opc = PPC::STWX8; break;
case PPC::STD: Opc = PPC::STDX; break;
- case PPC::STFS: Opc = PPC::STFSX; break;
+ case PPC::STFS: Opc = IsVSSRC ? PPC::STXSSPX : PPC::STFSX; break;
case PPC::STFD: Opc = IsVSFRC ? PPC::STXSDX : PPC::STFDX; break;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
- .addReg(SrcReg).addReg(Addr.Base.Reg).addReg(IndexReg);
+
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addReg(SrcReg);
+
+ // If we have an index register defined we use it in the store inst,
+ // otherwise we use X0 as base as it makes the vector instructions to
+ // use zero in the computation of the effective address regardless the
+ // content of the register.
+ if (IndexReg)
+ MIB.addReg(Addr.Base.Reg).addReg(IndexReg);
+ else
+ MIB.addReg(PPC::ZERO8).addReg(Addr.Base.Reg);
}
return true;
@@ -718,30 +752,31 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
// For now, just try the simplest case where it's fed by a compare.
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
- Optional<PPC::Predicate> OptPPCPred = getComparePred(CI->getPredicate());
- if (!OptPPCPred)
- return false;
+ if (isValueAvailable(CI)) {
+ Optional<PPC::Predicate> OptPPCPred = getComparePred(CI->getPredicate());
+ if (!OptPPCPred)
+ return false;
- PPC::Predicate PPCPred = OptPPCPred.getValue();
+ PPC::Predicate PPCPred = OptPPCPred.getValue();
- // Take advantage of fall-through opportunities.
- if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
- std::swap(TBB, FBB);
- PPCPred = PPC::InvertPredicate(PPCPred);
- }
-
- unsigned CondReg = createResultReg(&PPC::CRRCRegClass);
+ // Take advantage of fall-through opportunities.
+ if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
+ std::swap(TBB, FBB);
+ PPCPred = PPC::InvertPredicate(PPCPred);
+ }
- if (!PPCEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
- CondReg))
- return false;
+ unsigned CondReg = createResultReg(&PPC::CRRCRegClass);
- BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCC))
- .addImm(PPCPred).addReg(CondReg).addMBB(TBB);
- fastEmitBranch(FBB, DbgLoc);
- FuncInfo.MBB->addSuccessor(TBB);
- return true;
+ if (!PPCEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
+ CondReg))
+ return false;
+ BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCC))
+ .addImm(PPCPred).addReg(CondReg).addMBB(TBB);
+ fastEmitBranch(FBB, DbgLoc);
+ FuncInfo.MBB->addSuccessor(TBB);
+ return true;
+ }
} else if (const ConstantInt *CI =
dyn_cast<ConstantInt>(BI->getCondition())) {
uint64_t Imm = CI->getZExtValue();
@@ -945,6 +980,8 @@ unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg,
}
// Attempt to fast-select an integer-to-floating-point conversion.
+// FIXME: Once fast-isel has better support for VSX, conversions using
+// direct moves should be implemented.
bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
MVT DstVT;
Type *DstTy = I->getType();
@@ -1052,6 +1089,8 @@ unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
}
// Attempt to fast-select a floating-point-to-integer conversion.
+// FIXME: Once fast-isel has better support for VSX, conversions using
+// direct moves should be implemented.
bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
MVT DstVT, SrcVT;
Type *DstTy = I->getType();
@@ -1234,9 +1273,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, *Context);
// Reserve space for the linkage area on the stack.
- bool isELFv2ABI = PPCSubTarget->isELFv2ABI();
- unsigned LinkageSize = PPCFrameLowering::getLinkageSize(true, false,
- isELFv2ABI);
+ unsigned LinkageSize = PPCSubTarget->getFrameLowering()->getLinkageSize();
CCInfo.AllocateStack(LinkageSize, 8);
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS);
@@ -1275,7 +1312,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
// Prepare to assign register arguments. Every argument uses up a
// GPR protocol register even if it's passed in a floating-point
- // register.
+ // register (unless we're using the fast calling convention).
unsigned NextGPR = PPC::X3;
unsigned NextFPR = PPC::F1;
@@ -1325,7 +1362,8 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
unsigned ArgReg;
if (ArgVT == MVT::f32 || ArgVT == MVT::f64) {
ArgReg = NextFPR++;
- ++NextGPR;
+ if (CC != CallingConv::Fast)
+ ++NextGPR;
} else
ArgReg = NextGPR++;
@@ -1432,6 +1470,9 @@ bool PPCFastISel::fastLowerCall(CallLoweringInfo &CLI) {
else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
RetVT != MVT::i8)
return false;
+ else if (RetVT == MVT::i1 && PPCSubTarget->useCRBits())
+ // We can't handle boolean returns when CR bits are in use.
+ return false;
// FIXME: No multi-register return values yet.
if (RetVT != MVT::isVoid && RetVT != MVT::i8 && RetVT != MVT::i16 &&
@@ -1523,13 +1564,14 @@ bool PPCFastISel::fastLowerCall(CallLoweringInfo &CLI) {
for (unsigned II = 0, IE = RegArgs.size(); II != IE; ++II)
MIB.addReg(RegArgs[II], RegState::Implicit);
- // Direct calls in the ELFv2 ABI need the TOC register live into the call.
- if (PPCSubTarget->isELFv2ABI())
- MIB.addReg(PPC::X2, RegState::Implicit);
+ // Direct calls, in both the ELF V1 and V2 ABIs, need the TOC register live
+ // into the call.
+ PPCFuncInfo->setUsesTOCBasePtr();
+ MIB.addReg(PPC::X2, RegState::Implicit);
// Add a register mask with the call-preserved registers. Proper
// defs for return values will be added by setPhysRegsDeadExcept().
- MIB.addRegMask(TRI.getCallPreservedMask(CC));
+ MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
CLI.Call = MIB;
@@ -1863,6 +1905,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
unsigned Opc = (VT == MVT::f32) ? PPC::LFS : PPC::LFD;
unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ PPCFuncInfo->setUsesTOCBasePtr();
// For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocCPT),
@@ -1912,6 +1955,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
if (GV->isThreadLocal())
return 0;
+ PPCFuncInfo->setUsesTOCBasePtr();
// For small code model, generate a simple TOC load.
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtoc),
@@ -2297,13 +2341,10 @@ namespace llvm {
// Create the fast instruction selector for PowerPC64 ELF.
FastISel *PPC::createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) {
- const TargetMachine &TM = FuncInfo.MF->getTarget();
-
// Only available on 64-bit ELF for now.
- const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
- if (Subtarget->isPPC64() && Subtarget->isSVR4ABI())
+ const PPCSubtarget &Subtarget = FuncInfo.MF->getSubtarget<PPCSubtarget>();
+ if (Subtarget.isPPC64() && Subtarget.isSVR4ABI())
return new PPCFastISel(FuncInfo, LibInfo);
-
return nullptr;
}
}
OpenPOWER on IntegriCloud