summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp')
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp295
1 files changed, 223 insertions, 72 deletions
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 4879d4e..5e70ffe 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -49,9 +49,9 @@ static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
return true;
}
-static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
- MVT &LocVT, CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags, CCState &State)
+static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State)
{
static const MCPhysReg RegList[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
@@ -77,6 +77,29 @@ static bool CC_Sparc_Assign_f64(unsigned &ValNo, MVT &ValVT,
return true;
}
+static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State)
+{
+ static const MCPhysReg RegList[] = {
+ SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
+ };
+
+ // Try to get first reg.
+ if (unsigned Reg = State.AllocateReg(RegList))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else
+ return false;
+
+ // Try to get second reg.
+ if (unsigned Reg = State.AllocateReg(RegList))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else
+ return false;
+
+ return true;
+}
+
// Allocate a full-sized argument for the 64-bit ABI.
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
@@ -202,12 +225,34 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain,
RetOps.push_back(SDValue());
// Copy the result values into the output registers.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ for (unsigned i = 0, realRVLocIdx = 0;
+ i != RVLocs.size();
+ ++i, ++realRVLocIdx) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(),
- OutVals[i], Flag);
+ SDValue Arg = OutVals[realRVLocIdx];
+
+ if (VA.needsCustom()) {
+ assert(VA.getLocVT() == MVT::v2i32);
+ // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
+ // happen by default if this wasn't a legal type)
+
+ SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
+ Arg,
+ DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
+ SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
+ Arg,
+ DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
+
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ VA = RVLocs[++i]; // skip ahead to next loc
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
+ Flag);
+ } else
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
// Guarantee that all emitted copies are stuck together with flags.
Flag = Chain.getValue(1);
@@ -355,6 +400,7 @@ LowerFormalArguments_32(SDValue Chain,
CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
const unsigned StackOffset = 92;
+ bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
unsigned InIdx = 0;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
@@ -375,7 +421,8 @@ LowerFormalArguments_32(SDValue Chain,
if (VA.isRegLoc()) {
if (VA.needsCustom()) {
- assert(VA.getLocVT() == MVT::f64);
+ assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
+
unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
@@ -396,9 +443,13 @@ LowerFormalArguments_32(SDValue Chain,
&SP::IntRegsRegClass);
LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
}
+
+ if (IsLittleEndian)
+ std::swap(LoVal, HiVal);
+
SDValue WholeValue =
DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
- WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
+ WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
InVals.push_back(WholeValue);
continue;
}
@@ -422,7 +473,7 @@ LowerFormalArguments_32(SDValue Chain,
auto PtrVT = getPointerTy(DAG.getDataLayout());
if (VA.needsCustom()) {
- assert(VA.getValVT() == MVT::f64);
+ assert(VA.getValVT() == MVT::f64 || MVT::v2i32);
// If it is double-word aligned, just load.
if (Offset % 8 == 0) {
int FI = MF.getFrameInfo()->CreateFixedObject(8,
@@ -452,9 +503,12 @@ LowerFormalArguments_32(SDValue Chain,
MachinePointerInfo(),
false, false, false, 0);
+ if (IsLittleEndian)
+ std::swap(LoVal, HiVal);
+
SDValue WholeValue =
DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
- WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
+ WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
InVals.push_back(WholeValue);
continue;
}
@@ -468,16 +522,12 @@ LowerFormalArguments_32(SDValue Chain,
Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
MachinePointerInfo(),
false, false, false, 0);
+ } else if (VA.getValVT() == MVT::f128) {
+ report_fatal_error("SPARCv8 does not handle f128 in calls; "
+ "pass indirectly");
} else {
- ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
- // Sparc is big endian, so add an offset based on the ObjectVT.
- unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8);
- FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
- DAG.getConstant(Offset, dl, MVT::i32));
- Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
- MachinePointerInfo(),
- VA.getValVT(), false, false, false,0);
- Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load);
+ // We shouldn't see any other value types here.
+ llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
}
InVals.push_back(Load);
}
@@ -612,7 +662,7 @@ LowerFormalArguments_64(SDValue Chain,
InVals.push_back(DAG.getLoad(
VA.getValVT(), DL, Chain,
DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
- MachinePointerInfo::getFixedStack(FI), false, false, false, 0));
+ MachinePointerInfo::getFixedStack(MF, FI), false, false, false, 0));
}
if (!IsVarArg)
@@ -640,9 +690,9 @@ LowerFormalArguments_64(SDValue Chain,
SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true);
auto PtrVT = getPointerTy(MF.getDataLayout());
- OutChains.push_back(
- DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
- MachinePointerInfo::getFixedStack(FI), false, false, 0));
+ OutChains.push_back(DAG.getStore(
+ Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
+ MachinePointerInfo::getFixedStack(MF, FI), false, false, 0));
}
if (!OutChains.empty())
@@ -788,7 +838,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
}
if (VA.needsCustom()) {
- assert(VA.getLocVT() == MVT::f64);
+ assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
if (VA.isMemLoc()) {
unsigned Offset = VA.getLocMemOffset() + StackOffset;
@@ -804,49 +854,53 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
}
}
- SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
- Arg, StackPtr, MachinePointerInfo(),
- false, false, 0);
- // Sparc is big-endian, so the high part comes first.
- SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
- MachinePointerInfo(), false, false, false, 0);
- // Increment the pointer to the other half.
- StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
- DAG.getIntPtrConstant(4, dl));
- // Load the low part.
- SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr,
- MachinePointerInfo(), false, false, false, 0);
+ if (VA.getLocVT() == MVT::f64) {
+ // Move from the float value from float registers into the
+ // integer registers.
+
+ // TODO: The f64 -> v2i32 conversion is super-inefficient for
+ // constants: it sticks them in the constant pool, then loads
+ // to a fp register, then stores to temp memory, then loads to
+ // integer registers.
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
+ }
+
+ SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ Arg,
+ DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
+ SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ Arg,
+ DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
if (VA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Hi));
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
assert(i+1 != e);
CCValAssign &NextVA = ArgLocs[++i];
if (NextVA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Lo));
+ RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
} else {
- // Store the low part in stack.
+ // Store the second part in stack.
unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff,
MachinePointerInfo(),
false, false, 0));
}
} else {
unsigned Offset = VA.getLocMemOffset() + StackOffset;
- // Store the high part.
+ // Store the first part.
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff,
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Part0, PtrOff,
MachinePointerInfo(),
false, false, 0));
- // Store the low part.
+ // Store the second part.
PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Lo, PtrOff,
+ MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff,
MachinePointerInfo(),
false, false, 0));
}
@@ -990,8 +1044,8 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
if (!CalleeFn)
return 0;
- assert(CalleeFn->hasStructRetAttr() &&
- "Callee does not have the StructRet attribute.");
+ // It would be nice to check for the sret attribute on CalleeFn here,
+ // but since it is not part of the function type, any check will misfire.
PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
Type *ElementTy = Ty->getElementType();
@@ -1370,15 +1424,60 @@ static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
const SparcSubtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
- auto &DL = *TM.getDataLayout();
+ MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
+
+ // Instructions which use registers as conditionals examine all the
+ // bits (as does the pseudo SELECT_CC expansion). I don't think it
+ // matters much whether it's ZeroOrOneBooleanContent, or
+ // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
+ // former.
+ setBooleanContents(ZeroOrOneBooleanContent);
+ setBooleanVectorContents(ZeroOrOneBooleanContent);
// Set up the register classes.
addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
- if (Subtarget->is64Bit())
+ if (Subtarget->is64Bit()) {
addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
+ } else {
+ // On 32bit sparc, we define a double-register 32bit register
+ // class, as well. This is modeled in LLVM as a 2-vector of i32.
+ addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
+
+ // ...but almost all operations must be expanded, so set that as
+ // the default.
+ for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
+ setOperationAction(Op, MVT::v2i32, Expand);
+ }
+ // Truncating/extending stores/loads are also not supported.
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
+
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
+
+ setTruncStoreAction(VT, MVT::v2i32, Expand);
+ setTruncStoreAction(MVT::v2i32, VT, Expand);
+ }
+ // However, load and store *are* legal.
+ setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
+ setOperationAction(ISD::STORE, MVT::v2i32, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);
+
+ // And we need to promote i64 loads/stores into vector load/store
+ setOperationAction(ISD::LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::STORE, MVT::i64, Custom);
+
+ // Sadly, this doesn't work:
+ // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
+ // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
+ }
// Turn FP extload into load/fextend
for (MVT VT : MVT::fp_valuetypes()) {
@@ -1396,10 +1495,10 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
setTruncStoreAction(MVT::f128, MVT::f64, Expand);
// Custom legalize GlobalAddress nodes into LO/HI parts.
- setOperationAction(ISD::GlobalAddress, getPointerTy(DL), Custom);
- setOperationAction(ISD::GlobalTLSAddress, getPointerTy(DL), Custom);
- setOperationAction(ISD::ConstantPool, getPointerTy(DL), Custom);
- setOperationAction(ISD::BlockAddress, getPointerTy(DL), Custom);
+ setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
+ setOperationAction(ISD::ConstantPool, PtrVT, Custom);
+ setOperationAction(ISD::BlockAddress, PtrVT, Custom);
// Sparc doesn't have sext_inreg, replace them with shl/sra
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
@@ -1579,9 +1678,6 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
- setExceptionPointerRegister(SP::I0);
- setExceptionSelectorRegister(SP::I1);
-
setStackPointerRegisterToSaveRestore(SP::O6);
setOperationAction(ISD::CTPOP, MVT::i32,
@@ -1744,18 +1840,15 @@ void SparcTargetLowering::computeKnownBitsForTargetNode
// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
ISD::CondCode CC, unsigned &SPCC) {
- if (isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->isNullValue() &&
+ if (isNullConstant(RHS) &&
CC == ISD::SETNE &&
(((LHS.getOpcode() == SPISD::SELECT_ICC ||
LHS.getOpcode() == SPISD::SELECT_XCC) &&
LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
(LHS.getOpcode() == SPISD::SELECT_FCC &&
LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
- isa<ConstantSDNode>(LHS.getOperand(0)) &&
- isa<ConstantSDNode>(LHS.getOperand(1)) &&
- cast<ConstantSDNode>(LHS.getOperand(0))->isOne() &&
- cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) {
+ isOneConstant(LHS.getOperand(0)) &&
+ isNullConstant(LHS.getOperand(1))) {
SDValue CMPCC = LHS.getOperand(3);
SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
LHS = CMPCC.getOperand(0);
@@ -1821,7 +1914,8 @@ SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setHasCalls(true);
return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
- MachinePointerInfo::getGOT(), false, false, false, 0);
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()),
+ false, false, false, 0);
}
// This is one of the absolute code models.
@@ -1872,6 +1966,9 @@ SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
+ if (DAG.getTarget().Options.EmulatedTLS)
+ return LowerToTLSEmulatedModel(GA, DAG);
+
SDLoc DL(GA);
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
@@ -2601,6 +2698,17 @@ static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
return DAG.getMergeValues(Ops, dl);
}
+static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
+{
+ LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
+
+ EVT MemVT = LdNode->getMemoryVT();
+ if (MemVT == MVT::f128)
+ return LowerF128Load(Op, DAG);
+
+ return Op;
+}
+
// Lower a f128 store into two f64 stores.
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
SDLoc dl(Op);
@@ -2645,6 +2753,29 @@ static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
+static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
+{
+ SDLoc dl(Op);
+ StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
+
+ EVT MemVT = St->getMemoryVT();
+ if (MemVT == MVT::f128)
+ return LowerF128Store(Op, DAG);
+
+ if (MemVT == MVT::i64) {
+ // Custom handling for i64 stores: turn it into a bitcast and a
+ // v2i32 store.
+ SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
+ SDValue Chain = DAG.getStore(
+ St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
+ St->isVolatile(), St->isNonTemporal(), St->getAlignment(),
+ St->getAAInfo());
+ return Chain;
+ }
+
+ return SDValue();
+}
+
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
&& "invalid opcode");
@@ -2752,7 +2883,7 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
SDValue MulResult = TLI.makeLibCall(DAG,
RTLIB::MUL_I128, WideVT,
- Args, 4, isSigned, dl).first;
+ Args, isSigned, dl).first;
SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
MulResult, DAG.getIntPtrConstant(0, dl));
SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
@@ -2783,7 +2914,6 @@ static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
return SDValue();
}
-
SDValue SparcTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
@@ -2818,8 +2948,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
Subtarget);
- case ISD::LOAD: return LowerF128Load(Op, DAG);
- case ISD::STORE: return LowerF128Store(Op, DAG);
+ case ISD::LOAD: return LowerLOAD(Op, DAG);
+ case ISD::STORE: return LowerSTORE(Op, DAG);
case ISD::FADD: return LowerF128Op(Op, DAG,
getLibcallName(RTLIB::ADD_F128), 2);
case ISD::FSUB: return LowerF128Op(Op, DAG,
@@ -2921,8 +3051,7 @@ SparcTargetLowering::expandSelectCC(MachineInstr *MI,
// to set, the condition code register to branch on, the true/false values to
// select between, and a branch opcode to use.
const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It;
+ MachineFunction::iterator It = ++BB->getIterator();
// thisMBB:
// ...
@@ -3007,7 +3136,7 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
.addReg(AddrReg).addImm(0);
// Split the basic block MBB before MI and insert the loop block in the hole.
- MachineFunction::iterator MFI = MBB;
+ MachineFunction::iterator MFI = MBB->getIterator();
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
MachineFunction *MF = MBB->getParent();
MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
@@ -3149,9 +3278,12 @@ SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
- return std::make_pair(0U, &SP::IntRegsRegClass);
+ if (VT == MVT::v2i32)
+ return std::make_pair(0U, &SP::IntPairRegClass);
+ else
+ return std::make_pair(0U, &SP::IntRegsRegClass);
}
- } else if (!Constraint.empty() && Constraint.size() <= 5
+ } else if (!Constraint.empty() && Constraint.size() <= 5
&& Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
// constraint = '{r<d>}'
// Remove the braces from around the name.
@@ -3227,5 +3359,24 @@ void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
getLibcallName(libCall),
1));
return;
+ case ISD::LOAD: {
+ LoadSDNode *Ld = cast<LoadSDNode>(N);
+ // Custom handling only for i64: turn i64 load into a v2i32 load,
+ // and a bitcast.
+ if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
+ return;
+
+ SDLoc dl(N);
+ SDValue LoadRes = DAG.getExtLoad(
+ Ld->getExtensionType(), dl, MVT::v2i32,
+ Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
+ MVT::v2i32, Ld->isVolatile(), Ld->isNonTemporal(),
+ Ld->isInvariant(), Ld->getAlignment(), Ld->getAAInfo());
+
+ SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
+ Results.push_back(Res);
+ Results.push_back(LoadRes.getValue(1));
+ return;
+ }
}
}
OpenPOWER on IntegriCloud