summaryrefslogtreecommitdiffstats
path: root/lib/Target/CellSPU
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2012-08-15 19:34:23 +0000
committerdim <dim@FreeBSD.org>2012-08-15 19:34:23 +0000
commit721c201bd55ffb73cb2ba8d39e0570fa38c44e15 (patch)
treeeacfc83d988e4b9d11114387ae7dc41243f2a363 /lib/Target/CellSPU
parent2b2816e083a455f7a656ae88b0fd059d1688bb36 (diff)
downloadFreeBSD-src-721c201bd55ffb73cb2ba8d39e0570fa38c44e15.zip
FreeBSD-src-721c201bd55ffb73cb2ba8d39e0570fa38c44e15.tar.gz
Vendor import of llvm trunk r161861:
http://llvm.org/svn/llvm-project/llvm/trunk@161861
Diffstat (limited to 'lib/Target/CellSPU')
-rw-r--r--lib/Target/CellSPU/CMakeLists.txt2
-rw-r--r--lib/Target/CellSPU/README.txt14
-rw-r--r--lib/Target/CellSPU/SPUAsmPrinter.cpp4
-rw-r--r--lib/Target/CellSPU/SPUHazardRecognizers.cpp6
-rw-r--r--lib/Target/CellSPU/SPUHazardRecognizers.h6
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp73
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.h9
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.cpp94
-rw-r--r--lib/Target/CellSPU/SPUInstrInfo.td4
-rw-r--r--lib/Target/CellSPU/SPURegisterInfo.cpp3
-rw-r--r--lib/Target/CellSPU/SPURegisterInfo.h7
-rw-r--r--lib/Target/CellSPU/SPUTargetMachine.cpp6
12 files changed, 119 insertions, 109 deletions
diff --git a/lib/Target/CellSPU/CMakeLists.txt b/lib/Target/CellSPU/CMakeLists.txt
index cf4f796..1f8ca86 100644
--- a/lib/Target/CellSPU/CMakeLists.txt
+++ b/lib/Target/CellSPU/CMakeLists.txt
@@ -24,5 +24,7 @@ add_llvm_target(CellSPUCodeGen
SPUNopFiller.cpp
)
+add_dependencies(LLVMCellSPUCodeGen intrinsics_gen)
+
add_subdirectory(TargetInfo)
add_subdirectory(MCTargetDesc)
diff --git a/lib/Target/CellSPU/README.txt b/lib/Target/CellSPU/README.txt
index 3e7e0b6..3bce960 100644
--- a/lib/Target/CellSPU/README.txt
+++ b/lib/Target/CellSPU/README.txt
@@ -37,6 +37,20 @@ to add 'spu' to configure's --enable-targets option, e.g.:
---------------------------------------------------------------------------
TODO:
+* In commit r142152 vector legalization was set to element promotion per
+ default. This breaks half vectors (e.g. v2i32) badly as they get element
+ promoted to much slower types (v2i64).
+
+* Many CellSPU specific codegen tests only grep & count the number of
+ instructions, not checking their place with FileCheck. There have also
+ been some commits that change the CellSPU checks, some of which might
+ have not been thoroughly scrutinized w.r.t. to the changes they cause in SPU
+ assembly. (especially since about the time of r142152)
+
+* Some of the i64 math have huge tablegen rules, which sometime cause
+ tablegen to run out of memory. See e.g. bug 8850. i64 arithmetics
+ should probably be done with libraries.
+
* Create a machine pass for performing dual-pipeline scheduling specifically
for CellSPU, and insert branch prediction instructions as needed.
diff --git a/lib/Target/CellSPU/SPUAsmPrinter.cpp b/lib/Target/CellSPU/SPUAsmPrinter.cpp
index 14021fe..03d5a9a 100644
--- a/lib/Target/CellSPU/SPUAsmPrinter.cpp
+++ b/lib/Target/CellSPU/SPUAsmPrinter.cpp
@@ -301,7 +301,9 @@ bool SPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
if (ExtraCode[1] != 0) return true; // Unknown modifier.
switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
case 'L': // Write second word of DImode reference.
// Verify that this operand has two consecutive registers.
if (!MI->getOperand(OpNo).isReg() ||
diff --git a/lib/Target/CellSPU/SPUHazardRecognizers.cpp b/lib/Target/CellSPU/SPUHazardRecognizers.cpp
index 403d7ef..67a83f1 100644
--- a/lib/Target/CellSPU/SPUHazardRecognizers.cpp
+++ b/lib/Target/CellSPU/SPUHazardRecognizers.cpp
@@ -30,12 +30,6 @@ using namespace llvm;
// very little right now.
//===----------------------------------------------------------------------===//
-SPUHazardRecognizer::SPUHazardRecognizer(const TargetInstrInfo &tii) :
- TII(tii),
- EvenOdd(0)
-{
-}
-
/// Return the pipeline hazard type encountered or generated by this
/// instruction. Currently returns NoHazard.
///
diff --git a/lib/Target/CellSPU/SPUHazardRecognizers.h b/lib/Target/CellSPU/SPUHazardRecognizers.h
index 675632c..30acaea 100644
--- a/lib/Target/CellSPU/SPUHazardRecognizers.h
+++ b/lib/Target/CellSPU/SPUHazardRecognizers.h
@@ -24,12 +24,8 @@ class TargetInstrInfo;
/// SPUHazardRecognizer
class SPUHazardRecognizer : public ScheduleHazardRecognizer
{
-private:
- const TargetInstrInfo &TII;
- int EvenOdd;
-
public:
- SPUHazardRecognizer(const TargetInstrInfo &TII);
+ SPUHazardRecognizer(const TargetInstrInfo &/*TII*/) {}
virtual HazardType getHazardType(SUnit *SU, int Stalls);
virtual void EmitInstruction(SUnit *SU);
virtual void AdvanceCycle();
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index 0623741..4e9fcd1 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -77,12 +77,14 @@ namespace {
// Splice the libcall in wherever FindInputOutputChains tells us to.
Type *RetTy =
Op.getNode()->getValueType(0).getTypeForEVT(*DAG.getContext());
- std::pair<SDValue, SDValue> CallInfo =
- TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ TargetLowering::CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned,
+ false, false,
0, TLI.getLibcallCallingConv(LC),
/*isTailCall=*/false,
- /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
+ /*doesNotRet=*/false,
+ /*isReturnValueUsed=*/true,
Callee, Args, DAG, Op.getDebugLoc());
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
return CallInfo.first;
}
@@ -100,13 +102,13 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
// Set up the SPU's register classes:
- addRegisterClass(MVT::i8, SPU::R8CRegisterClass);
- addRegisterClass(MVT::i16, SPU::R16CRegisterClass);
- addRegisterClass(MVT::i32, SPU::R32CRegisterClass);
- addRegisterClass(MVT::i64, SPU::R64CRegisterClass);
- addRegisterClass(MVT::f32, SPU::R32FPRegisterClass);
- addRegisterClass(MVT::f64, SPU::R64FPRegisterClass);
- addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
+ addRegisterClass(MVT::i8, &SPU::R8CRegClass);
+ addRegisterClass(MVT::i16, &SPU::R16CRegClass);
+ addRegisterClass(MVT::i32, &SPU::R32CRegClass);
+ addRegisterClass(MVT::i64, &SPU::R64CRegClass);
+ addRegisterClass(MVT::f32, &SPU::R32FPRegClass);
+ addRegisterClass(MVT::f64, &SPU::R64FPRegClass);
+ addRegisterClass(MVT::i128, &SPU::GPRCRegClass);
// SPU has no sign or zero extended loads for i1, i8, i16:
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
@@ -397,12 +399,12 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
- addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
- addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
+ addRegisterClass(MVT::v16i8, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v8i16, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v4i32, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v2i64, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v4f32, &SPU::VECREGRegClass);
+ addRegisterClass(MVT::v2f64, &SPU::VECREGRegClass);
for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
@@ -1133,7 +1135,7 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// FIXME: allow for other calling conventions
CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
@@ -1263,14 +1265,19 @@ static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) {
}
SDValue
-SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+SPUTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
+ SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
// CellSPU target does not yet support tail call optimization.
isTailCall = false;
@@ -1280,7 +1287,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), ArgLocs, *DAG.getContext());
+ getTargetMachine(), ArgLocs, *DAG.getContext());
// FIXME: allow for other calling conventions
CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
@@ -1441,7 +1448,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Now handle the return value(s)
SmallVector<CCValAssign, 16> RVLocs;
CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCRetInfo.AnalyzeCallResult(Ins, CCC_SPU);
@@ -1468,7 +1475,7 @@ SPUTargetLowering::LowerReturn(SDValue Chain,
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
- getTargetMachine(), RVLocs, *DAG.getContext());
+ getTargetMachine(), RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_SPU);
// If this is the first return lowered for this function, add the regs to the
@@ -3139,16 +3146,16 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
case 'b': // R1-R31
case 'r': // R0-R31
if (VT == MVT::i64)
- return std::make_pair(0U, SPU::R64CRegisterClass);
- return std::make_pair(0U, SPU::R32CRegisterClass);
+ return std::make_pair(0U, &SPU::R64CRegClass);
+ return std::make_pair(0U, &SPU::R32CRegClass);
case 'f':
if (VT == MVT::f32)
- return std::make_pair(0U, SPU::R32FPRegisterClass);
- else if (VT == MVT::f64)
- return std::make_pair(0U, SPU::R64FPRegisterClass);
+ return std::make_pair(0U, &SPU::R32FPRegClass);
+ if (VT == MVT::f64)
+ return std::make_pair(0U, &SPU::R64FPRegClass);
break;
case 'v':
- return std::make_pair(0U, SPU::GPRCRegisterClass);
+ return std::make_pair(0U, &SPU::GPRCRegClass);
}
}
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h
index e3db7b2..9f1599f 100644
--- a/lib/Target/CellSPU/SPUISelLowering.h
+++ b/lib/Target/CellSPU/SPUISelLowering.h
@@ -86,7 +86,6 @@ namespace llvm {
class SPUTargetLowering :
public TargetLowering
{
- int VarArgsFrameIndex; // FrameIndex for start of varargs area.
SPUTargetMachine &SPUTM;
public:
@@ -159,13 +158,7 @@ namespace llvm {
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool doesNotRet, bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp
index 759923d..b25a639 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -140,29 +140,27 @@ SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const
-{
+ const TargetRegisterInfo *TRI) const {
unsigned opc;
bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
- if (RC == SPU::GPRCRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
- } else if (RC == SPU::R64CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
- } else if (RC == SPU::R64FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
- } else if (RC == SPU::R32CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
- } else if (RC == SPU::R32FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
- } else if (RC == SPU::R16CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
- } else if (RC == SPU::R8CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
- } else if (RC == SPU::VECREGRegisterClass) {
- opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
- } else {
+ if (RC == &SPU::GPRCRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128;
+ else if (RC == &SPU::R64CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64;
+ else if (RC == &SPU::R64FPRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64;
+ else if (RC == &SPU::R32CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32;
+ else if (RC == &SPU::R32FPRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32;
+ else if (RC == &SPU::R16CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16;
+ else if (RC == &SPU::R8CRegClass)
+ opc = isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8;
+ else if (RC == &SPU::VECREGRegClass)
+ opc = isValidFrameIdx ? SPU::STQDv16i8 : SPU::STQXv16i8;
+ else
llvm_unreachable("Unknown regclass!");
- }
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -175,29 +173,27 @@ SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const
-{
+ const TargetRegisterInfo *TRI) const {
unsigned opc;
bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
- if (RC == SPU::GPRCRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
- } else if (RC == SPU::R64CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
- } else if (RC == SPU::R64FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
- } else if (RC == SPU::R32CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
- } else if (RC == SPU::R32FPRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
- } else if (RC == SPU::R16CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
- } else if (RC == SPU::R8CRegisterClass) {
- opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
- } else if (RC == SPU::VECREGRegisterClass) {
- opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
- } else {
+ if (RC == &SPU::GPRCRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128;
+ else if (RC == &SPU::R64CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64;
+ else if (RC == &SPU::R64FPRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64;
+ else if (RC == &SPU::R32CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32;
+ else if (RC == &SPU::R32FPRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32;
+ else if (RC == &SPU::R16CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16;
+ else if (RC == &SPU::R8CRegClass)
+ opc = isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8;
+ else if (RC == &SPU::VECREGRegClass)
+ opc = isValidFrameIdx ? SPU::LQDv16i8 : SPU::LQXv16i8;
+ else
llvm_unreachable("Unknown regclass in loadRegFromStackSlot!");
- }
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -340,11 +336,11 @@ SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
static MachineBasicBlock::iterator findHBRPosition(MachineBasicBlock &MBB)
{
MachineBasicBlock::iterator J = MBB.end();
- for( int i=0; i<8; i++) {
- if( J == MBB.begin() ) return J;
- J--;
- }
- return J;
+ for( int i=0; i<8; i++) {
+ if( J == MBB.begin() ) return J;
+ J--;
+ }
+ return J;
}
unsigned
@@ -360,7 +356,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineInstrBuilder MIB;
//TODO: make a more accurate algorithm.
bool haveHBR = MBB.size()>8;
-
+
removeHBR(MBB);
MCSymbol *branchLabel = MBB.getParent()->getContext().CreateTempSymbol();
// Add a label just before the branch
@@ -382,7 +378,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MIB = BuildMI( MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
MIB.addSym(branchLabel);
MIB.addMBB(TBB);
- }
+ }
} else {
// Conditional branch
MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
@@ -392,7 +388,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MIB = BuildMI(MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
MIB.addSym(branchLabel);
MIB.addMBB(TBB);
- }
+ }
DEBUG(errs() << "Inserted one-way cond branch: ");
DEBUG((*MIB).dump());
@@ -410,7 +406,7 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MIB = BuildMI( MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
MIB.addSym(branchLabel);
MIB.addMBB(FBB);
- }
+ }
DEBUG(errs() << "Inserted conditional branch: ");
DEBUG((*MIB).dump());
diff --git a/lib/Target/CellSPU/SPUInstrInfo.td b/lib/Target/CellSPU/SPUInstrInfo.td
index f76ebd7..117acd7 100644
--- a/lib/Target/CellSPU/SPUInstrInfo.td
+++ b/lib/Target/CellSPU/SPUInstrInfo.td
@@ -3421,14 +3421,14 @@ let isCall = 1,
// Branch relative and set link: Used if we actually know that the target
// is within [-32768, 32767] bytes of the target
def BRSL:
- BranchSetLink<0b011001100, (outs), (ins relcalltarget:$func, variable_ops),
+ BranchSetLink<0b011001100, (outs), (ins relcalltarget:$func),
"brsl\t$$lr, $func",
[(SPUcall (SPUpcrel tglobaladdr:$func, 0))]>;
// Branch absolute and set link: Used if we actually know that the target
// is an absolute address
def BRASL:
- BranchSetLink<0b011001100, (outs), (ins calltarget:$func, variable_ops),
+ BranchSetLink<0b011001100, (outs), (ins calltarget:$func),
"brasl\t$$lr, $func",
[(SPUcall (SPUaform tglobaladdr:$func, 0))]>;
diff --git a/lib/Target/CellSPU/SPURegisterInfo.cpp b/lib/Target/CellSPU/SPURegisterInfo.cpp
index 1b2da5f..e6c872d 100644
--- a/lib/Target/CellSPU/SPURegisterInfo.cpp
+++ b/lib/Target/CellSPU/SPURegisterInfo.cpp
@@ -193,7 +193,8 @@ SPURegisterInfo::SPURegisterInfo(const SPUSubtarget &subtarget,
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
const TargetRegisterClass *
-SPURegisterInfo::getPointerRegClass(unsigned Kind) const {
+SPURegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
+ const {
return &SPU::R32CRegClass;
}
diff --git a/lib/Target/CellSPU/SPURegisterInfo.h b/lib/Target/CellSPU/SPURegisterInfo.h
index e5ab224..e9f9aba 100644
--- a/lib/Target/CellSPU/SPURegisterInfo.h
+++ b/lib/Target/CellSPU/SPURegisterInfo.h
@@ -46,7 +46,7 @@ namespace llvm {
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
virtual const TargetRegisterClass *
- getPointerRegClass(unsigned Kind = 0) const;
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
/// After allocating this many registers, the allocator should feel
/// register pressure. The value is a somewhat random guess, based on the
@@ -63,6 +63,11 @@ namespace llvm {
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
{ return true; }
+ //! Enable tracking of liveness after register allocation, since register
+ // scavenging is enabled.
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
+ { return true; }
+
//! Return the reserved registers
BitVector getReservedRegs(const MachineFunction &MF) const;
diff --git a/lib/Target/CellSPU/SPUTargetMachine.cpp b/lib/Target/CellSPU/SPUTargetMachine.cpp
index 3b90261..54764f1 100644
--- a/lib/Target/CellSPU/SPUTargetMachine.cpp
+++ b/lib/Target/CellSPU/SPUTargetMachine.cpp
@@ -72,7 +72,7 @@ TargetPassConfig *SPUTargetMachine::createPassConfig(PassManagerBase &PM) {
bool SPUPassConfig::addInstSelector() {
// Install an instruction selector.
- PM->add(createSPUISelDag(getSPUTargetMachine()));
+ addPass(createSPUISelDag(getSPUTargetMachine()));
return false;
}
@@ -85,9 +85,9 @@ bool SPUPassConfig::addPreEmitPass() {
(BuilderFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
"createTCESchedulerPass");
if (schedulerCreator != NULL)
- PM->add(schedulerCreator("cellspu"));
+ addPass(schedulerCreator("cellspu"));
//align instructions with nops/lnops for dual issue
- PM->add(createSPUNopFillerPass(getSPUTargetMachine()));
+ addPass(createSPUNopFillerPass(getSPUTargetMachine()));
return true;
}
OpenPOWER on IntegriCloud