summaryrefslogtreecommitdiffstats
path: root/lib/CodeGen/SelectionDAG
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/SelectionDAG')
-rw-r--r--lib/CodeGen/SelectionDAG/CMakeLists.txt6
-rw-r--r--lib/CodeGen/SelectionDAG/CallingConvLower.cpp121
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp654
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp183
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp693
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.h119
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp448
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp154
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp318
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp130
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.h45
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp68
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp24
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp589
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp81
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp26
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp73
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp113
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h69
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp1245
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp1269
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuild.h34
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp423
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp38
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp836
25 files changed, 4459 insertions, 3300 deletions
diff --git a/lib/CodeGen/SelectionDAG/CMakeLists.txt b/lib/CodeGen/SelectionDAG/CMakeLists.txt
index 4ffe88f..c766859 100644
--- a/lib/CodeGen/SelectionDAG/CMakeLists.txt
+++ b/lib/CodeGen/SelectionDAG/CMakeLists.txt
@@ -2,6 +2,7 @@ add_llvm_library(LLVMSelectionDAG
CallingConvLower.cpp
DAGCombiner.cpp
FastISel.cpp
+ InstrEmitter.cpp
LegalizeDAG.cpp
LegalizeFloatTypes.cpp
LegalizeIntegerTypes.cpp
@@ -9,13 +10,12 @@ add_llvm_library(LLVMSelectionDAG
LegalizeTypesGeneric.cpp
LegalizeVectorOps.cpp
LegalizeVectorTypes.cpp
- ScheduleDAGSDNodes.cpp
- ScheduleDAGSDNodesEmit.cpp
ScheduleDAGFast.cpp
ScheduleDAGList.cpp
ScheduleDAGRRList.cpp
- SelectionDAGBuild.cpp
+ ScheduleDAGSDNodes.cpp
SelectionDAG.cpp
+ SelectionDAGBuild.cpp
SelectionDAGISel.cpp
SelectionDAGPrinter.cpp
TargetLowering.cpp
diff --git a/lib/CodeGen/SelectionDAG/CallingConvLower.cpp b/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
index 7cd2b73..fbe40b6 100644
--- a/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
+++ b/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
@@ -13,15 +13,17 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
-CCState::CCState(unsigned CC, bool isVarArg, const TargetMachine &tm,
- SmallVector<CCValAssign, 16> &locs)
+CCState::CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &tm,
+ SmallVector<CCValAssign, 16> &locs, LLVMContext &C)
: CallingConv(CC), IsVarArg(isVarArg), TM(tm),
- TRI(*TM.getRegisterInfo()), Locs(locs) {
+ TRI(*TM.getRegisterInfo()), Locs(locs), Context(C) {
// No stack is used.
StackOffset = 0;
@@ -31,8 +33,8 @@ CCState::CCState(unsigned CC, bool isVarArg, const TargetMachine &tm,
// HandleByVal - Allocate a stack slot large enough to pass an argument by
// value. The size and alignment information of the argument is encoded in its
// parameter attribute.
-void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
- MVT LocVT, CCValAssign::LocInfo LocInfo,
+void CCState::HandleByVal(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
int MinSize, int MinAlign,
ISD::ArgFlagsTy ArgFlags) {
unsigned Align = ArgFlags.getByValAlign();
@@ -55,94 +57,107 @@ void CCState::MarkAllocated(unsigned Reg) {
UsedRegs[Reg/32] |= 1 << (Reg&31);
}
-/// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
+/// AnalyzeFormalArguments - Analyze an array of argument values,
/// incorporating info about the formals into this state.
-void CCState::AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn) {
- unsigned NumArgs = TheArgs->getNumValues()-1;
-
+void
+CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+ CCAssignFn Fn) {
+ unsigned NumArgs = Ins.size();
+
for (unsigned i = 0; i != NumArgs; ++i) {
- MVT ArgVT = TheArgs->getValueType(i);
- ISD::ArgFlagsTy ArgFlags =
- cast<ARG_FLAGSSDNode>(TheArgs->getOperand(3+i))->getArgFlags();
+ EVT ArgVT = Ins[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
- cerr << "Formal argument #" << i << " has unhandled type "
- << ArgVT.getMVTString() << "\n";
- abort();
+#ifndef NDEBUG
+ errs() << "Formal argument #" << i << " has unhandled type "
+ << ArgVT.getEVTString();
+#endif
+ llvm_unreachable(0);
}
}
}
-/// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
+/// AnalyzeReturn - Analyze the returned values of a return,
/// incorporating info about the result values into this state.
-void CCState::AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn) {
+void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn) {
// Determine which register each value should be copied into.
- for (unsigned i = 0, e = TheRet->getNumOperands() / 2; i != e; ++i) {
- MVT VT = TheRet->getOperand(i*2+1).getValueType();
- ISD::ArgFlagsTy ArgFlags =
- cast<ARG_FLAGSSDNode>(TheRet->getOperand(i*2+2))->getArgFlags();
- if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)){
- cerr << "Return operand #" << i << " has unhandled type "
- << VT.getMVTString() << "\n";
- abort();
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ EVT VT = Outs[i].Val.getValueType();
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
+#ifndef NDEBUG
+ errs() << "Return operand #" << i << " has unhandled type "
+ << VT.getEVTString();
+#endif
+ llvm_unreachable(0);
}
}
}
-/// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
-/// about the passed values into this state.
-void CCState::AnalyzeCallOperands(CallSDNode *TheCall, CCAssignFn Fn) {
- unsigned NumOps = TheCall->getNumArgs();
+/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
+/// incorporating info about the passed values into this state.
+void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn) {
+ unsigned NumOps = Outs.size();
for (unsigned i = 0; i != NumOps; ++i) {
- MVT ArgVT = TheCall->getArg(i).getValueType();
- ISD::ArgFlagsTy ArgFlags = TheCall->getArgFlags(i);
+ EVT ArgVT = Outs[i].Val.getValueType();
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
- cerr << "Call operand #" << i << " has unhandled type "
- << ArgVT.getMVTString() << "\n";
- abort();
+#ifndef NDEBUG
+ errs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString();
+#endif
+ llvm_unreachable(0);
}
}
}
/// AnalyzeCallOperands - Same as above except it takes vectors of types
/// and argument flags.
-void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
+void CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn) {
unsigned NumOps = ArgVTs.size();
for (unsigned i = 0; i != NumOps; ++i) {
- MVT ArgVT = ArgVTs[i];
+ EVT ArgVT = ArgVTs[i];
ISD::ArgFlagsTy ArgFlags = Flags[i];
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
- cerr << "Call operand #" << i << " has unhandled type "
- << ArgVT.getMVTString() << "\n";
- abort();
+#ifndef NDEBUG
+ errs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString();
+#endif
+ llvm_unreachable(0);
}
}
}
-/// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
+/// AnalyzeCallResult - Analyze the return values of a call,
/// incorporating info about the passed values into this state.
-void CCState::AnalyzeCallResult(CallSDNode *TheCall, CCAssignFn Fn) {
- for (unsigned i = 0, e = TheCall->getNumRetVals(); i != e; ++i) {
- MVT VT = TheCall->getRetValType(i);
- ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (TheCall->isInreg())
- Flags.setInReg();
+void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+ CCAssignFn Fn) {
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ EVT VT = Ins[i].VT;
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
- cerr << "Call result #" << i << " has unhandled type "
- << VT.getMVTString() << "\n";
- abort();
+#ifndef NDEBUG
+ errs() << "Call result #" << i << " has unhandled type "
+ << VT.getEVTString();
+#endif
+ llvm_unreachable(0);
}
}
}
/// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value.
-void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
+void CCState::AnalyzeCallResult(EVT VT, CCAssignFn Fn) {
if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
- cerr << "Call result has unhandled type "
- << VT.getMVTString() << "\n";
- abort();
+#ifndef NDEBUG
+ errs() << "Call result has unhandled type "
+ << VT.getEVTString();
+#endif
+ llvm_unreachable(0);
}
}
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 609ec82..1ed3082 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -19,6 +19,7 @@
#define DEBUG_TYPE "dagcombine"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/LLVMContext.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
@@ -33,7 +34,9 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <set>
using namespace llvm;
@@ -213,12 +216,12 @@ namespace {
SDValue SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2,
SDValue N3, ISD::CondCode CC,
bool NotExtCompare = false);
- SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
+ SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
DebugLoc DL, bool foldBooleans = true);
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp);
- SDValue CombineConsecutiveLoads(SDNode *N, MVT VT);
- SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT);
+ SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
+ SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, EVT);
SDValue BuildSDIV(SDNode *N);
SDValue BuildUDIV(SDNode *N);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
@@ -236,14 +239,17 @@ namespace {
/// overlap.
bool isAlias(SDValue Ptr1, int64_t Size1,
const Value *SrcValue1, int SrcValueOffset1,
+ unsigned SrcValueAlign1,
SDValue Ptr2, int64_t Size2,
- const Value *SrcValue2, int SrcValueOffset2) const;
+ const Value *SrcValue2, int SrcValueOffset2,
+ unsigned SrcValueAlign2) const;
/// FindAliasInfo - Extracts the relevant alias information from the memory
/// node. Returns true if the operand was a load.
bool FindAliasInfo(SDNode *N,
SDValue &Ptr, int64_t &Size,
- const Value *&SrcValue, int &SrcValueOffset) const;
+ const Value *&SrcValue, int &SrcValueOffset,
+ unsigned &SrcValueAlignment) const;
/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
/// looking for a better chain (aliasing node.)
@@ -251,7 +257,7 @@ namespace {
/// getShiftAmountTy - Returns a type large enough to hold any valid
/// shift amount - before type legalization these can be huge.
- MVT getShiftAmountTy() {
+ EVT getShiftAmountTy() {
return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
}
@@ -392,7 +398,7 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
switch (Op.getOpcode()) {
- default: assert(0 && "Unknown code");
+ default: llvm_unreachable("Unknown code");
case ISD::ConstantFP: {
APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF();
V.changeSign();
@@ -495,7 +501,7 @@ static bool isOneUseSetCC(SDValue N) {
SDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL,
SDValue N0, SDValue N1) {
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) {
if (isa<ConstantSDNode>(N1)) {
// reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
@@ -537,10 +543,12 @@ SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
bool AddTo) {
assert(N->getNumValues() == NumTo && "Broken CombineTo call!");
++NodesCombined;
- DOUT << "\nReplacing.1 "; DEBUG(N->dump(&DAG));
- DOUT << "\nWith: "; DEBUG(To[0].getNode()->dump(&DAG));
- DOUT << " and " << NumTo-1 << " other values\n";
- DEBUG(for (unsigned i = 0, e = NumTo; i != e; ++i)
+ DEBUG(errs() << "\nReplacing.1 ";
+ N->dump(&DAG);
+ errs() << "\nWith: ";
+ To[0].getNode()->dump(&DAG);
+ errs() << " and " << NumTo-1 << " other values\n";
+ for (unsigned i = 0, e = NumTo; i != e; ++i)
assert(N->getValueType(i) == To[i].getValueType() &&
"Cannot combine value to value of different type!"));
WorkListRemover DeadNodes(*this);
@@ -612,9 +620,11 @@ bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
// Replace the old value with the new one.
++NodesCombined;
- DOUT << "\nReplacing.2 "; DEBUG(TLO.Old.getNode()->dump(&DAG));
- DOUT << "\nWith: "; DEBUG(TLO.New.getNode()->dump(&DAG));
- DOUT << '\n';
+ DEBUG(errs() << "\nReplacing.2 ";
+ TLO.Old.getNode()->dump(&DAG);
+ errs() << "\nWith: ";
+ TLO.New.getNode()->dump(&DAG);
+ errs() << '\n');
CommitTargetLoweringOpt(TLO);
return true;
@@ -680,9 +690,11 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned new node!");
- DOUT << "\nReplacing.3 "; DEBUG(N->dump(&DAG));
- DOUT << "\nWith: "; DEBUG(RV.getNode()->dump(&DAG));
- DOUT << '\n';
+ DEBUG(errs() << "\nReplacing.3 ";
+ N->dump(&DAG);
+ errs() << "\nWith: ";
+ RV.getNode()->dump(&DAG);
+ errs() << '\n');
WorkListRemover DeadNodes(*this);
if (N->getNumValues() == RV.getNode()->getNumValues())
DAG.ReplaceAllUsesWith(N, RV.getNode(), &DeadNodes);
@@ -800,7 +812,7 @@ SDValue DAGCombiner::combine(SDNode *N) {
// Expose the DAG combiner to the target combiner impls.
TargetLowering::DAGCombinerInfo
- DagCombineInfo(DAG, Level == Unrestricted, false, this);
+ DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this);
RV = TLI.PerformDAGCombine(N, DagCombineInfo);
}
@@ -877,7 +889,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
break;
case ISD::TokenFactor:
- if ((CombinerAA || Op.hasOneUse()) &&
+ if (Op.hasOneUse() &&
std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) {
// Queue up for processing.
TFs.push_back(Op.getNode());
@@ -898,7 +910,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
}
}
}
-
+
SDValue Result;
// If we've change things around then replace token factor.
@@ -922,9 +934,14 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
/// MERGE_VALUES can always be eliminated.
SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
WorkListRemover DeadNodes(*this);
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i),
- &DeadNodes);
+ // Replacing results may cause a different MERGE_VALUES to suddenly
+ // be CSE'd with N, and carry its uses with it. Iterate until no
+ // uses remain, to ensure that the node can be safely deleted.
+ do {
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i),
+ &DeadNodes);
+ } while (!N->use_empty());
removeFromWorkList(N);
DAG.DeleteNode(N);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
@@ -933,7 +950,7 @@ SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {
static
SDValue combineShlAddConstant(DebugLoc DL, SDValue N0, SDValue N1,
SelectionDAG &DAG) {
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01);
@@ -957,7 +974,7 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
@@ -1080,7 +1097,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
// If the flag result is dead, turn this into an ADD.
if (N->hasNUsesOfValue(0, 1))
@@ -1142,7 +1159,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
@@ -1215,7 +1232,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
@@ -1308,7 +1325,7 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector()) {
@@ -1395,7 +1412,7 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector()) {
@@ -1415,7 +1432,7 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
if (N1.getOpcode() == ISD::SHL) {
if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
if (SHC->getAPIntValue().isPowerOf2()) {
- MVT ADDVT = N1.getOperand(1).getValueType();
+ EVT ADDVT = N1.getOperand(1).getValueType();
SDValue Add = DAG.getNode(ISD::ADD, N->getDebugLoc(), ADDVT,
N1.getOperand(1),
DAG.getConstant(SHC->getAPIntValue()
@@ -1447,7 +1464,7 @@ SDValue DAGCombiner::visitSREM(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (srem c1, c2) -> c1%c2
if (N0C && N1C && !N1C->isNullValue())
@@ -1489,7 +1506,7 @@ SDValue DAGCombiner::visitUREM(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (urem c1, c2) -> c1%c2
if (N0C && N1C && !N1C->isNullValue())
@@ -1541,7 +1558,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (mulhs x, 0) -> 0
if (N1C && N1C->isNullValue())
@@ -1562,7 +1579,7 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (mulhu x, 0) -> 0
if (N1C && N1C->isNullValue())
@@ -1665,7 +1682,7 @@ SDValue DAGCombiner::visitUDIVREM(SDNode *N) {
/// two operands of the same opcode, try to simplify it.
SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
// For each of OP in AND/OR/XOR:
@@ -1677,7 +1694,9 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
N0.getOpcode() == ISD::SIGN_EXTEND ||
(N0.getOpcode() == ISD::TRUNCATE &&
!TLI.isTruncateFree(N0.getOperand(0).getValueType(), VT))) &&
- N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
+ N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() &&
+ (!LegalOperations ||
+ TLI.isOperationLegal(N->getOpcode(), N0.getOperand(0).getValueType()))) {
SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(),
N0.getOperand(0).getValueType(),
N0.getOperand(0), N1.getOperand(0));
@@ -1709,7 +1728,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
SDValue LL, LR, RL, RR, CC0, CC1;
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N1.getValueType();
+ EVT VT = N1.getValueType();
unsigned BitWidth = VT.getSizeInBits();
// fold vector ops
@@ -1820,18 +1839,18 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// fold (zext_inreg (extload x)) -> (zextload x)
if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- MVT EVT = LN0->getMemoryVT();
+ EVT MemVT = LN0->getMemoryVT();
// If we zero all the possible extended bits, then we can turn this into
// a zextload if we are running before legalize or the operation is legal.
unsigned BitWidth = N1.getValueSizeInBits();
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
- BitWidth - EVT.getSizeInBits())) &&
+ BitWidth - MemVT.getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
- TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(),
- LN0->getSrcValueOffset(), EVT,
+ LN0->getSrcValueOffset(), MemVT,
LN0->isVolatile(), LN0->getAlignment());
AddToWorkList(N);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
@@ -1842,18 +1861,18 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- MVT EVT = LN0->getMemoryVT();
+ EVT MemVT = LN0->getMemoryVT();
// If we zero all the possible extended bits, then we can turn this into
// a zextload if we are running before legalize or the operation is legal.
unsigned BitWidth = N1.getValueSizeInBits();
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
- BitWidth - EVT.getSizeInBits())) &&
+ BitWidth - MemVT.getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
- TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), EVT,
+ LN0->getSrcValueOffset(), MemVT,
LN0->isVolatile(), LN0->getAlignment());
AddToWorkList(N);
CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
@@ -1869,24 +1888,24 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
LN0->isUnindexed() && N0.hasOneUse() &&
// Do not change the width of a volatile load.
!LN0->isVolatile()) {
- MVT EVT = MVT::Other;
+ EVT ExtVT = MVT::Other;
uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue()))
- EVT = MVT::getIntegerVT(ActiveBits);
+ ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits);
- MVT LoadedVT = LN0->getMemoryVT();
+ EVT LoadedVT = LN0->getMemoryVT();
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
- if (EVT != MVT::Other && LoadedVT.bitsGT(EVT) && EVT.isRound() &&
- (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT))) {
- MVT PtrType = N0.getOperand(1).getValueType();
+ if (ExtVT != MVT::Other && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() &&
+ (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
+ EVT PtrType = N0.getOperand(1).getValueType();
// For big endian targets, we need to add an offset to the pointer to
// load the correct bytes. For little endian systems, we merely need to
// read fewer bytes from the same pointer.
- unsigned LVTStoreBytes = LoadedVT.getStoreSizeInBits()/8;
- unsigned EVTStoreBytes = EVT.getStoreSizeInBits()/8;
+ unsigned LVTStoreBytes = LoadedVT.getStoreSize();
+ unsigned EVTStoreBytes = ExtVT.getStoreSize();
unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
unsigned Alignment = LN0->getAlignment();
SDValue NewPtr = LN0->getBasePtr();
@@ -1901,7 +1920,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
SDValue Load =
DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), VT, LN0->getChain(),
NewPtr, LN0->getSrcValue(), LN0->getSrcValueOffset(),
- EVT, LN0->isVolatile(), Alignment);
+ ExtVT, LN0->isVolatile(), Alignment);
AddToWorkList(N);
CombineTo(N0.getNode(), Load, Load.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
@@ -1918,7 +1937,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
SDValue LL, LR, RL, RR, CC0, CC1;
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N1.getValueType();
+ EVT VT = N1.getValueType();
// fold vector ops
if (VT.isVector()) {
@@ -1928,7 +1947,7 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
// fold (or x, undef) -> -1
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
- return DAG.getConstant(~0ULL, VT);
+ return DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
// fold (or c1, c2) -> c1|c2
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C);
@@ -2058,7 +2077,7 @@ static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) {
// a rot[lr].
SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) {
// Must be a legal type. Expanded 'n promoted things won't work with rotates.
- MVT VT = LHS.getValueType();
+ EVT VT = LHS.getValueType();
if (!TLI.isTypeLegal(VT)) return 0;
// The target must have at least one rotate flavor.
@@ -2219,7 +2238,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
SDValue LHS, RHS, CC;
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
// fold vector ops
if (VT.isVector()) {
@@ -2258,8 +2277,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
if (!LegalOperations || TLI.isCondCodeLegal(NotCC, LHS.getValueType())) {
switch (N0.getOpcode()) {
default:
- assert(0 && "Unhandled SetCC Equivalent!");
- abort();
+ llvm_unreachable("Unhandled SetCC Equivalent!");
case ISD::SETCC:
return DAG.getSetCC(N->getDebugLoc(), VT, LHS, RHS, NotCC);
case ISD::SELECT_CC:
@@ -2388,7 +2406,7 @@ SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) {
!isa<ConstantSDNode>(BinOpLHSVal->getOperand(1)))
return SDValue();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// If this is a signed shift right, and the high bit is modified by the
// logical operation, do not perform the transformation. The highBitSet
@@ -2418,7 +2436,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getSizeInBits();
// fold (shl c1, c2) -> c1<<c2
@@ -2443,7 +2461,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
SDValue N101 = N1.getOperand(0).getOperand(1);
if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
- MVT TruncVT = N1.getValueType();
+ EVT TruncVT = N1.getValueType();
SDValue N100 = N1.getOperand(0).getOperand(0);
APInt TruncC = N101C->getAPIntValue();
TruncC.trunc(TruncVT.getSizeInBits());
@@ -2474,20 +2492,33 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
if (N1C && N0.getOpcode() == ISD::SRL &&
N0.getOperand(1).getOpcode() == ISD::Constant) {
uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
- uint64_t c2 = N1C->getZExtValue();
- SDValue Mask = DAG.getNode(ISD::AND, N0.getDebugLoc(), VT, N0.getOperand(0),
- DAG.getConstant(~0ULL << c1, VT));
- if (c2 > c1)
- return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, Mask,
- DAG.getConstant(c2-c1, N1.getValueType()));
- else
- return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Mask,
- DAG.getConstant(c1-c2, N1.getValueType()));
+ if (c1 < VT.getSizeInBits()) {
+ uint64_t c2 = N1C->getZExtValue();
+ SDValue HiBitsMask =
+ DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(),
+ VT.getSizeInBits() - c1),
+ VT);
+ SDValue Mask = DAG.getNode(ISD::AND, N0.getDebugLoc(), VT,
+ N0.getOperand(0),
+ HiBitsMask);
+ if (c2 > c1)
+ return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, Mask,
+ DAG.getConstant(c2-c1, N1.getValueType()));
+ else
+ return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Mask,
+ DAG.getConstant(c1-c2, N1.getValueType()));
+ }
}
// fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
- if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1))
+ if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) {
+ SDValue HiBitsMask =
+ DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(),
+ VT.getSizeInBits() -
+ N1C->getZExtValue()),
+ VT);
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
- DAG.getConstant(~0ULL << N1C->getZExtValue(), VT));
+ HiBitsMask);
+ }
return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
}
@@ -2497,7 +2528,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
// fold (sra c1, c2) -> (sra c1, c2)
if (N0C && N1C)
@@ -2518,7 +2549,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// sext_inreg.
if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
unsigned LowBits = VT.getSizeInBits() - (unsigned)N1C->getZExtValue();
- MVT EVT = MVT::getIntegerVT(LowBits);
+ EVT EVT = EVT::getIntegerVT(*DAG.getContext(), LowBits);
if ((!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, EVT)))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
N0.getOperand(0), DAG.getValueType(EVT));
@@ -2545,8 +2576,8 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
if (N01C && N1C) {
// Determine what the truncate's result bitsize and type would be.
unsigned VTValSize = VT.getSizeInBits();
- MVT TruncVT =
- MVT::getIntegerVT(VTValSize - N1C->getZExtValue());
+ EVT TruncVT =
+ EVT::getIntegerVT(*DAG.getContext(), VTValSize - N1C->getZExtValue());
// Determine the residual right-shift amount.
signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
@@ -2576,7 +2607,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
SDValue N101 = N1.getOperand(0).getOperand(1);
if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
- MVT TruncVT = N1.getValueType();
+ EVT TruncVT = N1.getValueType();
SDValue N100 = N1.getOperand(0).getOperand(0);
APInt TruncC = N101C->getAPIntValue();
TruncC.trunc(TruncVT.getSizeInBits());
@@ -2607,7 +2638,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- MVT VT = N0.getValueType();
+ EVT VT = N0.getValueType();
unsigned OpSizeInBits = VT.getSizeInBits();
// fold (srl c1, c2) -> c1 >>u c2
@@ -2641,7 +2672,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// fold (srl (anyextend x), c) -> (anyextend (srl x, c))
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
// Shifting in all undef bits?
- MVT SmallVT = N0.getOperand(0).getValueType();
+ EVT SmallVT = N0.getOperand(0).getValueType();
if (N1C->getZExtValue() >= SmallVT.getSizeInBits())
return DAG.getUNDEF(VT);
@@ -2700,7 +2731,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
N1.hasOneUse() && N1.getOperand(0).hasOneUse()) {
SDValue N101 = N1.getOperand(0).getOperand(1);
if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) {
- MVT TruncVT = N1.getValueType();
+ EVT TruncVT = N1.getValueType();
SDValue N100 = N1.getOperand(0).getOperand(0);
APInt TruncC = N101C->getAPIntValue();
TruncC.trunc(TruncVT.getSizeInBits());
@@ -2724,7 +2755,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (ctlz c1) -> c2
if (isa<ConstantSDNode>(N0))
@@ -2734,7 +2765,7 @@ SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue DAGCombiner::visitCTTZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (cttz c1) -> c2
if (isa<ConstantSDNode>(N0))
@@ -2744,7 +2775,7 @@ SDValue DAGCombiner::visitCTTZ(SDNode *N) {
SDValue DAGCombiner::visitCTPOP(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (ctpop c1) -> c2
if (isa<ConstantSDNode>(N0))
@@ -2759,8 +2790,8 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
- MVT VT = N->getValueType(0);
- MVT VT0 = N0.getValueType();
+ EVT VT = N->getValueType(0);
+ EVT VT0 = N0.getValueType();
// fold (select C, X, X) -> X
if (N1 == N2)
@@ -2825,7 +2856,8 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
// having to say they don't support SELECT_CC on every type the DAG knows
// about, since there is no way to mark an opcode illegal at all value types
- if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other))
+ if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other) &&
+ TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))
return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT,
N0.getOperand(0), N0.getOperand(1),
N1, N2, N0.getOperand(2));
@@ -2945,7 +2977,7 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (sext c1) -> c1
if (isa<ConstantSDNode>(N0))
@@ -3054,13 +3086,13 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- MVT EVT = LN0->getMemoryVT();
+ EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
- TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT)) {
+ TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), EVT,
+ LN0->getSrcValueOffset(), MemVT,
LN0->isVolatile(), LN0->getAlignment());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
@@ -3071,14 +3103,34 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
}
}
- // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc)
if (N0.getOpcode() == ISD::SETCC) {
+ // sext(setcc) -> sext_in_reg(vsetcc) for vectors.
+ if (VT.isVector() &&
+ // We know that the # elements of the results is the same as the
+ // # elements of the compare (and the # elements of the compare result
+ // for that matter). Check to see that they are the same size. If so,
+ // we know that the element size of the sext'd result matches the
+ // element size of the compare operands.
+ VT.getSizeInBits() == N0.getOperand(0).getValueType().getSizeInBits() &&
+
+ // Only do this before legalize for now.
+ !LegalOperations) {
+ return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ }
+
+ // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc)
+ SDValue NegOne =
+ DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
SDValue SCC =
SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
- DAG.getConstant(~0ULL, VT), DAG.getConstant(0, VT),
+ NegOne, DAG.getConstant(0, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode()) return SCC;
}
+
+
// fold (sext x) -> (zext x) if the sign bit is known zero.
if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
@@ -3090,7 +3142,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (zext c1) -> c1
if (isa<ConstantSDNode>(N0))
@@ -3194,13 +3246,13 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) &&
ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- MVT EVT = LN0->getMemoryVT();
+ EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
- TLI.isLoadExtLegal(ISD::ZEXTLOAD, EVT)) {
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
- LN0->getSrcValueOffset(), EVT,
+ LN0->getSrcValueOffset(), MemVT,
LN0->isVolatile(), LN0->getAlignment());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
@@ -3225,7 +3277,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (aext c1) -> c1
if (isa<ConstantSDNode>(N0))
@@ -3330,11 +3382,11 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
!ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- MVT EVT = LN0->getMemoryVT();
+ EVT MemVT = LN0->getMemoryVT();
SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(),
VT, LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(),
- LN0->getSrcValueOffset(), EVT,
+ LN0->getSrcValueOffset(), MemVT,
LN0->isVolatile(), LN0->getAlignment());
CombineTo(N, ExtLoad);
CombineTo(N0.getNode(),
@@ -3400,8 +3452,8 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
unsigned Opc = N->getOpcode();
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
- MVT EVT = VT;
+ EVT VT = N->getValueType(0);
+ EVT ExtVT = VT;
// This transformation isn't valid for vector loads.
if (VT.isVector())
@@ -3411,20 +3463,21 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// extended to VT.
if (Opc == ISD::SIGN_EXTEND_INREG) {
ExtType = ISD::SEXTLOAD;
- EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
- if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))
+ ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+ if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, ExtVT))
return SDValue();
}
- unsigned EVTBits = EVT.getSizeInBits();
+ unsigned EVTBits = ExtVT.getSizeInBits();
unsigned ShAmt = 0;
- if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
+ if (N0.getOpcode() == ISD::SRL && N0.hasOneUse() && ExtVT.isRound()) {
if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
ShAmt = N01->getZExtValue();
// Is the shift amount a multiple of size of VT?
if ((ShAmt & (EVTBits-1)) == 0) {
N0 = N0.getOperand(0);
- if (N0.getValueType().getSizeInBits() <= EVTBits)
+ // Is the load width a multiple of size of VT?
+ if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
return SDValue();
}
}
@@ -3432,18 +3485,18 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
- if (isa<LoadSDNode>(N0) && N0.hasOneUse() && EVT.isRound() &&
+ if (isa<LoadSDNode>(N0) && N0.hasOneUse() && ExtVT.isRound() &&
cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() > EVTBits &&
// Do not change the width of a volatile load.
!cast<LoadSDNode>(N0)->isVolatile()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- MVT PtrType = N0.getOperand(1).getValueType();
+ EVT PtrType = N0.getOperand(1).getValueType();
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
if (TLI.isBigEndian()) {
unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
- unsigned EVTStoreBits = EVT.getStoreSizeInBits();
+ unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
}
@@ -3460,7 +3513,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
LN0->isVolatile(), NewAlign)
: DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(), NewPtr,
LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
- EVT, LN0->isVolatile(), NewAlign);
+ ExtVT, LN0->isVolatile(), NewAlign);
// Replace the old load's chain with the new load's chain.
WorkListRemover DeadNodes(*this);
@@ -3477,8 +3530,8 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- MVT VT = N->getValueType(0);
- MVT EVT = cast<VTSDNode>(N1)->getVT();
+ EVT VT = N->getValueType(0);
+ EVT EVT = cast<VTSDNode>(N1)->getVT();
unsigned VTBits = VT.getSizeInBits();
unsigned EVTBits = EVT.getSizeInBits();
@@ -3573,7 +3626,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// noop truncate
if (N0.getValueType() == N->getValueType(0))
@@ -3623,14 +3676,14 @@ static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
/// CombineConsecutiveLoads - build_pair (load, load) -> load
/// if load locations are consecutive.
-SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) {
+SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
assert(N->getOpcode() == ISD::BUILD_PAIR);
LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse())
return SDValue();
- MVT LD1VT = LD1->getValueType(0);
+ EVT LD1VT = LD1->getValueType(0);
const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
if (ISD::isNON_EXTLoad(LD2) &&
@@ -3642,7 +3695,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) {
TLI.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1, MFI)) {
unsigned Align = LD1->getAlignment();
unsigned NewAlign = TLI.getTargetData()->
- getABITypeAlignment(VT.getTypeForMVT());
+ getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign <= Align &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
@@ -3656,7 +3709,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) {
SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// If the input is a BUILD_VECTOR with all constant elements, fold this now.
// Only do this before legalize, since afterward the target may be depending
@@ -3674,7 +3727,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
break;
}
- MVT DestEltVT = N->getValueType(0).getVectorElementType();
+ EVT DestEltVT = N->getValueType(0).getVectorElementType();
assert(!DestEltVT.isVector() &&
"Element type of vector ValueType must not be vector!");
if (isSimple)
@@ -3684,7 +3737,18 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
// If the input is a constant, let getNode fold it.
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
SDValue Res = DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, N0);
- if (Res.getNode() != N) return Res;
+ if (Res.getNode() != N) {
+ if (!LegalOperations ||
+ TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
+ return Res;
+
+ // Folding it resulted in an illegal node, and it's too late to
+ // do that. Clean up the old node and forego the transformation.
+ // Ideally this won't happen very often, because instcombine
+ // and the earlier dagcombine runs (where illegal nodes are
+ // permitted) should have folded most of them already.
+ DAG.DeleteNode(Res.getNode());
+ }
}
// (conv (conv x, t1), t2) -> (conv x, t2)
@@ -3700,7 +3764,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
unsigned Align = TLI.getTargetData()->
- getABITypeAlignment(VT.getTypeForMVT());
+ getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
unsigned OrigAlign = LN0->getAlignment();
if (Align <= OrigAlign) {
@@ -3743,7 +3807,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
isa<ConstantFPSDNode>(N0.getOperand(0)) &&
VT.isInteger() && !VT.isVector()) {
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
- MVT IntXVT = MVT::getIntegerVT(OrigXWidth);
+ EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
if (TLI.isTypeLegal(IntXVT) || !LegalTypes) {
SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
IntXVT, N0.getOperand(1));
@@ -3791,7 +3855,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
}
SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
return CombineConsecutiveLoads(N, VT);
}
@@ -3799,8 +3863,8 @@ SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
/// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the
/// destination element value type.
SDValue DAGCombiner::
-ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
- MVT SrcEltVT = BV->getValueType(0).getVectorElementType();
+ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
+ EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
// If this is already the right type, we're done.
if (SrcEltVT == DstEltVT) return SDValue(BV, 0);
@@ -3822,7 +3886,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
DstEltVT, Op));
AddToWorkList(Ops.back().getNode());
}
- MVT VT = MVT::getVectorVT(DstEltVT,
+ EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
BV->getValueType(0).getVectorNumElements());
return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
&Ops[0], Ops.size());
@@ -3835,7 +3899,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
// Convert the input float vector to a int vector where the elements are the
// same sizes.
assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
- MVT IntVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits());
+ EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).getNode();
SrcEltVT = IntVT;
}
@@ -3844,7 +3908,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
// convert to integer first, then to FP of the right size.
if (DstEltVT.isFloatingPoint()) {
assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
- MVT TmpVT = MVT::getIntegerVT(DstEltVT.getSizeInBits());
+ EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).getNode();
// Next, convert to FP elements of the same size.
@@ -3880,7 +3944,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
Ops.push_back(DAG.getConstant(NewBits, DstEltVT));
}
- MVT VT = MVT::getVectorVT(DstEltVT, Ops.size());
+ EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size());
return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
&Ops[0], Ops.size());
}
@@ -3889,7 +3953,8 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
// turns into multiple outputs.
bool isS2V = ISD::isScalarToVector(BV);
unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
- MVT VT = MVT::getVectorVT(DstEltVT, NumOutputsPerInput*BV->getNumOperands());
+ EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
+ NumOutputsPerInput*BV->getNumOperands());
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
@@ -3926,7 +3991,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector()) {
@@ -3967,7 +4032,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector()) {
@@ -4001,7 +4066,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector()) {
@@ -4024,7 +4089,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
// fold (fmul X, 2.0) -> (fadd X, X)
if (N1CFP && N1CFP->isExactlyValue(+2.0))
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N0);
- // fold (fmul X, (fneg 1.0)) -> (fneg X)
+ // fold (fmul X, -1.0) -> (fneg X)
if (N1CFP && N1CFP->isExactlyValue(-1.0))
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0);
@@ -4056,7 +4121,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold vector ops
if (VT.isVector()) {
@@ -4089,7 +4154,7 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (frem c1, c2) -> fmod(c1,c2)
if (N0CFP && N1CFP && VT != MVT::ppcf128)
@@ -4103,7 +4168,7 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
if (N0CFP && N1CFP && VT != MVT::ppcf128) // Constant fold
return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1);
@@ -4151,8 +4216,8 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- MVT VT = N->getValueType(0);
- MVT OpVT = N0.getValueType();
+ EVT VT = N->getValueType(0);
+ EVT OpVT = N0.getValueType();
// fold (sint_to_fp c1) -> c1fp
if (N0C && OpVT != MVT::ppcf128)
@@ -4173,8 +4238,8 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
SDValue N0 = N->getOperand(0);
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- MVT VT = N->getValueType(0);
- MVT OpVT = N0.getValueType();
+ EVT VT = N->getValueType(0);
+ EVT OpVT = N0.getValueType();
// fold (uint_to_fp c1) -> c1fp
if (N0C && OpVT != MVT::ppcf128)
@@ -4195,7 +4260,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
SDValue N0 = N->getOperand(0);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (fp_to_sint c1fp) -> c1
if (N0CFP)
@@ -4207,7 +4272,7 @@ SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {
SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
SDValue N0 = N->getOperand(0);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (fp_to_uint c1fp) -> c1
if (N0CFP && VT != MVT::ppcf128)
@@ -4220,7 +4285,7 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (fp_round c1fp) -> c1fp
if (N0CFP && N0.getValueType() != MVT::ppcf128)
@@ -4253,8 +4318,8 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
SDValue N0 = N->getOperand(0);
- MVT VT = N->getValueType(0);
- MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+ EVT VT = N->getValueType(0);
+ EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
// fold (fp_round_inreg c1fp) -> c1fp
@@ -4269,7 +4334,7 @@ SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
if (N->hasOneUse() &&
@@ -4326,7 +4391,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
N0.getOperand(0).getValueType().isInteger() &&
!N0.getOperand(0).getValueType().isVector()) {
SDValue Int = N0.getOperand(0);
- MVT IntVT = Int.getValueType();
+ EVT IntVT = Int.getValueType();
if (IntVT.isInteger() && !IntVT.isVector()) {
Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int,
DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
@@ -4342,7 +4407,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
SDValue DAGCombiner::visitFABS(SDNode *N) {
SDValue N0 = N->getOperand(0);
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// fold (fabs c1) -> fabs(c1)
if (N0CFP && VT != MVT::ppcf128)
@@ -4361,7 +4426,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
N0.getOperand(0).getValueType().isInteger() &&
!N0.getOperand(0).getValueType().isVector()) {
SDValue Int = N0.getOperand(0);
- MVT IntVT = Int.getValueType();
+ EVT IntVT = Int.getValueType();
if (IntVT.isInteger() && !IntVT.isVector()) {
Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int,
DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
@@ -4419,7 +4484,6 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
if (Op0.getOpcode() == ISD::AND &&
Op0.hasOneUse() &&
Op1.getOpcode() == ISD::Constant) {
- SDValue AndOp0 = Op0.getOperand(0);
SDValue AndOp1 = Op0.getOperand(1);
if (AndOp1.getOpcode() == ISD::Constant) {
@@ -4491,7 +4555,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
bool isLoad = true;
SDValue Ptr;
- MVT VT;
+ EVT VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
if (LD->isIndexed())
return false;
@@ -4579,9 +4643,11 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {
BasePtr, Offset, AM);
++PreIndexedNodes;
++NodesCombined;
- DOUT << "\nReplacing.4 "; DEBUG(N->dump(&DAG));
- DOUT << "\nWith: "; DEBUG(Result.getNode()->dump(&DAG));
- DOUT << '\n';
+ DEBUG(errs() << "\nReplacing.4 ";
+ N->dump(&DAG);
+ errs() << "\nWith: ";
+ Result.getNode()->dump(&DAG);
+ errs() << '\n');
WorkListRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
@@ -4616,7 +4682,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
bool isLoad = true;
SDValue Ptr;
- MVT VT;
+ EVT VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
if (LD->isIndexed())
return false;
@@ -4652,7 +4718,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
SDValue Offset;
ISD::MemIndexedMode AM = ISD::UNINDEXED;
if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
- if (Ptr == Offset)
+ if (Ptr == Offset && Op->getOpcode() == ISD::ADD)
std::swap(BasePtr, Offset);
if (Ptr != BasePtr)
continue;
@@ -4711,9 +4777,11 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
BasePtr, Offset, AM);
++PostIndexedNodes;
++NodesCombined;
- DOUT << "\nReplacing.5 "; DEBUG(N->dump(&DAG));
- DOUT << "\nWith: "; DEBUG(Result.getNode()->dump(&DAG));
- DOUT << '\n';
+ DEBUG(errs() << "\nReplacing.5 ";
+ N->dump(&DAG);
+ errs() << "\nWith: ";
+ Result.getNode()->dump(&DAG);
+ errs() << '\n');
WorkListRemover DeadNodes(*this);
if (isLoad) {
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0),
@@ -4815,9 +4883,11 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// v3 = add v2, c
// Now we replace use of chain2 with chain1. This makes the second load
// isomorphic to the one we are deleting, and thus makes this load live.
- DOUT << "\nReplacing.6 "; DEBUG(N->dump(&DAG));
- DOUT << "\nWith chain: "; DEBUG(Chain.getNode()->dump(&DAG));
- DOUT << "\n";
+ DEBUG(errs() << "\nReplacing.6 ";
+ N->dump(&DAG);
+ errs() << "\nWith chain: ";
+ Chain.getNode()->dump(&DAG);
+ errs() << "\n");
WorkListRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain, &DeadNodes);
@@ -4833,9 +4903,11 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) {
SDValue Undef = DAG.getUNDEF(N->getValueType(0));
- DOUT << "\nReplacing.6 "; DEBUG(N->dump(&DAG));
- DOUT << "\nWith: "; DEBUG(Undef.getNode()->dump(&DAG));
- DOUT << " and 2 other values\n";
+ DEBUG(errs() << "\nReplacing.6 ";
+ N->dump(&DAG);
+ errs() << "\nWith: ";
+ Undef.getNode()->dump(&DAG);
+ errs() << " and 2 other values\n");
WorkListRemover DeadNodes(*this);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef, &DeadNodes);
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1),
@@ -4890,7 +4962,10 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// Create token factor to keep old chain connected.
SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
MVT::Other, Chain, ReplLoad.getValue(1));
-
+
+ // Make sure the new and old chains are cleaned up.
+ AddToWorkList(Token.getNode());
+
// Replace uses with load result and token factor. Don't add users
// to work list.
return CombineTo(N, ReplLoad.getValue(0), Token, false);
@@ -4917,7 +4992,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
SDValue Chain = ST->getChain();
SDValue Value = ST->getValue();
SDValue Ptr = ST->getBasePtr();
- MVT VT = Value.getValueType();
+ EVT VT = Value.getValueType();
if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse())
return SDValue();
@@ -4944,12 +5019,12 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
unsigned ShAmt = Imm.countTrailingZeros();
unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1;
unsigned NewBW = NextPowerOf2(MSB - ShAmt);
- MVT NewVT = MVT::getIntegerVT(NewBW);
+ EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
while (NewBW < BitWidth &&
!(TLI.isOperationLegalOrCustom(Opc, NewVT) &&
TLI.isNarrowingProfitable(VT, NewVT))) {
NewBW = NextPowerOf2(NewBW);
- NewVT = MVT::getIntegerVT(NewBW);
+ NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW);
}
if (NewBW >= BitWidth)
return SDValue();
@@ -4971,7 +5046,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
if (NewAlign <
- TLI.getTargetData()->getABITypeAlignment(NewVT.getTypeForMVT()))
+ TLI.getTargetData()->getABITypeAlignment(NewVT.getTypeForEVT(*DAG.getContext())))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
@@ -5024,9 +5099,9 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment();
- MVT SVT = Value.getOperand(0).getValueType();
+ EVT SVT = Value.getOperand(0).getValueType();
unsigned Align = TLI.getTargetData()->
- getABITypeAlignment(SVT.getTypeForMVT());
+ getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
if (Align <= OrigAlign &&
((!LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
@@ -5043,8 +5118,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
// transform should not be done in this case.
if (Value.getOpcode() != ISD::TargetConstantFP) {
SDValue Tmp;
- switch (CFP->getValueType(0).getSimpleVT()) {
- default: assert(0 && "Unknown FP type");
+ switch (CFP->getValueType(0).getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unknown FP type");
case MVT::f80: // We don't do this for these yet.
case MVT::f128:
case MVT::ppcf128:
@@ -5111,8 +5186,9 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
// If there is a better chain.
if (Chain != BetterChain) {
- // Replace the chain to avoid dependency.
SDValue ReplStore;
+
+ // Replace the chain to avoid dependency.
if (ST->isTruncatingStore()) {
ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr,
ST->getSrcValue(),ST->getSrcValueOffset(),
@@ -5128,6 +5204,9 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
MVT::Other, Chain, ReplStore);
+ // Make sure the new and old chains are cleaned up.
+ AddToWorkList(Token.getNode());
+
// Don't add users to work list.
return CombineTo(N, Token, false);
}
@@ -5211,10 +5290,10 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// BUILD_VECTOR with undef elements and the inserted element.
if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF &&
isa<ConstantSDNode>(EltNo)) {
- MVT VT = InVec.getValueType();
- MVT EVT = VT.getVectorElementType();
+ EVT VT = InVec.getValueType();
+ EVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
- SmallVector<SDValue, 8> Ops(NElts, DAG.getUNDEF(EVT));
+ SmallVector<SDValue, 8> Ops(NElts, DAG.getUNDEF(EltVT));
unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
if (Elt < Ops.size())
@@ -5232,7 +5311,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
// If the operand is wider than the vector element type then it is implicitly
// truncated. Make that explicit here.
- MVT EltVT = InVec.getValueType().getVectorElementType();
+ EVT EltVT = InVec.getValueType().getVectorElementType();
SDValue InOp = InVec.getOperand(0);
if (InOp.getValueType() != EltVT)
return DAG.getNode(ISD::TRUNCATE, InVec.getDebugLoc(), EltVT, InOp);
@@ -5252,18 +5331,18 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
bool NewLoad = false;
bool BCNumEltsChanged = false;
- MVT VT = InVec.getValueType();
- MVT EVT = VT.getVectorElementType();
- MVT LVT = EVT;
+ EVT VT = InVec.getValueType();
+ EVT ExtVT = VT.getVectorElementType();
+ EVT LVT = ExtVT;
if (InVec.getOpcode() == ISD::BIT_CONVERT) {
- MVT BCVT = InVec.getOperand(0).getValueType();
- if (!BCVT.isVector() || EVT.bitsGT(BCVT.getVectorElementType()))
+ EVT BCVT = InVec.getOperand(0).getValueType();
+ if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
return SDValue();
if (VT.getVectorNumElements() != BCVT.getVectorNumElements())
BCNumEltsChanged = true;
InVec = InVec.getOperand(0);
- EVT = BCVT.getVectorElementType();
+ ExtVT = BCVT.getVectorElementType();
NewLoad = true;
}
@@ -5272,7 +5351,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec);
} else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
- InVec.getOperand(0).getValueType() == EVT &&
+ InVec.getOperand(0).getValueType() == ExtVT &&
ISD::isNormalLoad(InVec.getOperand(0).getNode())) {
LN0 = cast<LoadSDNode>(InVec.getOperand(0));
} else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) {
@@ -5306,7 +5385,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// Check the resultant load doesn't need a higher alignment than the
// original load.
unsigned NewAlign =
- TLI.getTargetData()->getABITypeAlignment(LVT.getTypeForMVT());
+ TLI.getTargetData()->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
return SDValue();
@@ -5317,7 +5396,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
SDValue NewPtr = LN0->getBasePtr();
if (Elt) {
unsigned PtrOff = LVT.getSizeInBits() * Elt / 8;
- MVT PtrType = NewPtr.getValueType();
+ EVT PtrType = NewPtr.getValueType();
if (TLI.isBigEndian())
PtrOff = VT.getSizeInBits() / 8 - PtrOff;
NewPtr = DAG.getNode(ISD::ADD, N->getDebugLoc(), PtrType, NewPtr,
@@ -5334,8 +5413,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
unsigned NumInScalars = N->getNumOperands();
- MVT VT = N->getValueType(0);
- MVT EltType = VT.getVectorElementType();
+ EVT VT = N->getValueType(0);
// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
// operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
@@ -5432,11 +5510,10 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
return SDValue();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
assert(N0.getValueType().getVectorNumElements() == NumElts &&
"Vector shuffle must be normalized in DAG");
@@ -5494,7 +5571,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
/// vector_shuffle V, Zero, <0, 4, 2, 4>
SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@@ -5517,14 +5594,14 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
}
// Let's see if the target supports this vector_shuffle.
- MVT RVT = RHS.getValueType();
+ EVT RVT = RHS.getValueType();
if (!TLI.isVectorClearMaskLegal(Indices, RVT))
return SDValue();
// Return the new VECTOR_SHUFFLE node.
- MVT EVT = RVT.getVectorElementType();
+ EVT EltVT = RVT.getVectorElementType();
SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(),
- DAG.getConstant(0, EVT));
+ DAG.getConstant(0, EltVT));
SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
RVT, &ZeroOps[0], ZeroOps.size());
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, RVT, LHS);
@@ -5543,10 +5620,10 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
// things. Simplifying them may result in a loss of legality.
if (LegalOperations) return SDValue();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
assert(VT.isVector() && "SimplifyVBinOp only works on vectors!");
- MVT EltType = VT.getVectorElementType();
+ EVT EltType = VT.getVectorElementType();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
SDValue Shuffle = XformToShuffleWithZero(N);
@@ -5589,7 +5666,7 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
}
if (Ops.size() == LHS.getNumOperands()) {
- MVT VT = LHS.getValueType();
+ EVT VT = LHS.getValueType();
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
&Ops[0], Ops.size());
}
@@ -5728,7 +5805,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
// (x ? y : y) -> y.
if (N2 == N3) return N2;
- MVT VT = N2.getValueType();
+ EVT VT = N2.getValueType();
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
@@ -5820,8 +5897,8 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
N2.getValueType().isInteger() &&
(N1C->isNullValue() || // (a < 0) ? b : 0
(N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0
- MVT XType = N0.getValueType();
- MVT AType = N2.getValueType();
+ EVT XType = N0.getValueType();
+ EVT AType = N2.getValueType();
if (XType.bitsGE(AType)) {
// and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
// single-bit constant.
@@ -5900,7 +5977,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
// FIXME: Turn all of these into setcc if setcc if setcc is legal
// otherwise, go ahead with the folds.
if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) {
- MVT XType = N0.getValueType();
+ EVT XType = N0.getValueType();
if (!LegalOperations ||
TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(XType))) {
SDValue Res = DAG.getSetCC(DL, TLI.getSetCCResultType(XType), N0, N1, CC);
@@ -5942,7 +6019,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
N2.getOperand(0) == N1 && N0.getValueType().isInteger()) {
- MVT XType = N0.getValueType();
+ EVT XType = N0.getValueType();
SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, N0,
DAG.getConstant(XType.getSizeInBits()-1,
getShiftAmountTy()));
@@ -5957,7 +6034,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
- MVT XType = N0.getValueType();
+ EVT XType = N0.getValueType();
if (SubC->isNullValue() && XType.isInteger()) {
SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
N0,
@@ -5976,11 +6053,11 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
}
/// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC.
-SDValue DAGCombiner::SimplifySetCC(MVT VT, SDValue N0,
+SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0,
SDValue N1, ISD::CondCode Cond,
DebugLoc DL, bool foldBooleans) {
TargetLowering::DAGCombinerInfo
- DagCombineInfo(DAG, Level == Unrestricted, false, this);
+ DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this);
return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
}
@@ -6012,11 +6089,12 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
return S;
}
-/// FindBaseOffset - Return true if base is known not to alias with anything
-/// but itself. Provides base object and offset as results.
-static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset) {
+/// FindBaseOffset - Return true if base is a frame index, which is known not
+// to alias with anything but itself. Provides base object and offset as results.
+static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
+ GlobalValue *&GV, void *&CV) {
// Assume it is a primitive operation.
- Base = Ptr; Offset = 0;
+ Base = Ptr; Offset = 0; GV = 0; CV = 0;
// If it's an adding a simple constant then integrate the offset.
if (Base.getOpcode() == ISD::ADD) {
@@ -6025,36 +6103,73 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset) {
Offset += C->getZExtValue();
}
}
+
+ // Return the underlying GlobalValue, and update the Offset. Return false
+ // for GlobalAddressSDNode since the same GlobalAddress may be represented
+ // by multiple nodes with different offsets.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) {
+ GV = G->getGlobal();
+ Offset += G->getOffset();
+ return false;
+ }
+ // Return the underlying Constant value, and update the Offset. Return false
+ // for ConstantSDNodes since the same constant pool entry may be represented
+ // by multiple nodes with different offsets.
+ if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) {
+ CV = C->isMachineConstantPoolEntry() ? (void *)C->getMachineCPVal()
+ : (void *)C->getConstVal();
+ Offset += C->getOffset();
+ return false;
+ }
// If it's any of the following then it can't alias with anything but itself.
- return isa<FrameIndexSDNode>(Base) ||
- isa<ConstantPoolSDNode>(Base) ||
- isa<GlobalAddressSDNode>(Base);
+ return isa<FrameIndexSDNode>(Base);
}
/// isAlias - Return true if there is any possibility that the two addresses
/// overlap.
bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
const Value *SrcValue1, int SrcValueOffset1,
+ unsigned SrcValueAlign1,
SDValue Ptr2, int64_t Size2,
- const Value *SrcValue2, int SrcValueOffset2) const {
+ const Value *SrcValue2, int SrcValueOffset2,
+ unsigned SrcValueAlign2) const {
// If they are the same then they must be aliases.
if (Ptr1 == Ptr2) return true;
// Gather base node and offset information.
SDValue Base1, Base2;
int64_t Offset1, Offset2;
- bool KnownBase1 = FindBaseOffset(Ptr1, Base1, Offset1);
- bool KnownBase2 = FindBaseOffset(Ptr2, Base2, Offset2);
+ GlobalValue *GV1, *GV2;
+ void *CV1, *CV2;
+ bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1);
+ bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2);
- // If they have a same base address then...
- if (Base1 == Base2)
- // Check to see if the addresses overlap.
+ // If they have a same base address then check to see if they overlap.
+ if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
- // If we know both bases then they can't alias.
- if (KnownBase1 && KnownBase2) return false;
+ // If we know what the bases are, and they aren't identical, then we know they
+ // cannot alias.
+ if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
+ return false;
+ // If we know required SrcValue1 and SrcValue2 have relatively large alignment
+ // compared to the size and offset of the access, we may be able to prove they
+ // do not alias. This check is conservative for now to catch cases created by
+ // splitting vector types.
+ if ((SrcValueAlign1 == SrcValueAlign2) &&
+ (SrcValueOffset1 != SrcValueOffset2) &&
+ (Size1 == Size2) && (SrcValueAlign1 > Size1)) {
+ int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1;
+ int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1;
+
+ // There is no overlap between these relatively aligned accesses of similar
+ // size, return no alias.
+ if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1)
+ return false;
+ }
+
if (CombinerGlobalAA) {
// Use alias analysis information.
int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
@@ -6074,20 +6189,24 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
/// node. Returns true if the operand was a load.
bool DAGCombiner::FindAliasInfo(SDNode *N,
SDValue &Ptr, int64_t &Size,
- const Value *&SrcValue, int &SrcValueOffset) const {
+ const Value *&SrcValue,
+ int &SrcValueOffset,
+ unsigned &SrcValueAlign) const {
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
Ptr = LD->getBasePtr();
Size = LD->getMemoryVT().getSizeInBits() >> 3;
SrcValue = LD->getSrcValue();
SrcValueOffset = LD->getSrcValueOffset();
+ SrcValueAlign = LD->getOriginalAlignment();
return true;
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
Ptr = ST->getBasePtr();
Size = ST->getMemoryVT().getSizeInBits() >> 3;
SrcValue = ST->getSrcValue();
SrcValueOffset = ST->getSrcValueOffset();
+ SrcValueAlign = ST->getOriginalAlignment();
} else {
- assert(0 && "FindAliasInfo expected a memory operand");
+ llvm_unreachable("FindAliasInfo expected a memory operand");
}
return false;
@@ -6098,28 +6217,45 @@ bool DAGCombiner::FindAliasInfo(SDNode *N,
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
SmallVector<SDValue, 8> &Aliases) {
SmallVector<SDValue, 8> Chains; // List of chains to visit.
- std::set<SDNode *> Visited; // Visited node set.
+ SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
// Get alias information for node.
SDValue Ptr;
- int64_t Size = 0;
- const Value *SrcValue = 0;
- int SrcValueOffset = 0;
- bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset);
+ int64_t Size;
+ const Value *SrcValue;
+ int SrcValueOffset;
+ unsigned SrcValueAlign;
+ bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
+ SrcValueAlign);
// Starting off.
Chains.push_back(OriginalChain);
-
+ unsigned Depth = 0;
+
// Look at each chain and determine if it is an alias. If so, add it to the
// aliases list. If not, then continue up the chain looking for the next
// candidate.
while (!Chains.empty()) {
SDValue Chain = Chains.back();
Chains.pop_back();
+
+ // For TokenFactor nodes, look at each operand and only continue up the
+ // chain until we find two aliases. If we've seen two aliases, assume we'll
+ // find more and revert to original chain since the xform is unlikely to be
+ // profitable.
+ //
+ // FIXME: The depth check could be made to return the last non-aliasing
+ // chain we found before we hit a tokenfactor rather than the original
+ // chain.
+ if (Depth > 6 || Aliases.size() == 2) {
+ Aliases.clear();
+ Aliases.push_back(OriginalChain);
+ break;
+ }
- // Don't bother if we've been before.
- if (Visited.find(Chain.getNode()) != Visited.end()) continue;
- Visited.insert(Chain.getNode());
+ // Don't bother if we've been before.
+ if (!Visited.insert(Chain.getNode()))
+ continue;
switch (Chain.getOpcode()) {
case ISD::EntryToken:
@@ -6130,35 +6266,40 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
case ISD::STORE: {
// Get alias information for Chain.
SDValue OpPtr;
- int64_t OpSize = 0;
- const Value *OpSrcValue = 0;
- int OpSrcValueOffset = 0;
+ int64_t OpSize;
+ const Value *OpSrcValue;
+ int OpSrcValueOffset;
+ unsigned OpSrcValueAlign;
bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize,
- OpSrcValue, OpSrcValueOffset);
+ OpSrcValue, OpSrcValueOffset,
+ OpSrcValueAlign);
// If chain is alias then stop here.
if (!(IsLoad && IsOpLoad) &&
- isAlias(Ptr, Size, SrcValue, SrcValueOffset,
- OpPtr, OpSize, OpSrcValue, OpSrcValueOffset)) {
+ isAlias(Ptr, Size, SrcValue, SrcValueOffset, SrcValueAlign,
+ OpPtr, OpSize, OpSrcValue, OpSrcValueOffset,
+ OpSrcValueAlign)) {
Aliases.push_back(Chain);
} else {
// Look further up the chain.
Chains.push_back(Chain.getOperand(0));
- // Clean up old chain.
- AddToWorkList(Chain.getNode());
+ ++Depth;
}
break;
}
case ISD::TokenFactor:
- // We have to check each of the operands of the token factor, so we queue
- // then up. Adding the operands to the queue (stack) in reverse order
- // maintains the original order and increases the likelihood that getNode
- // will find a matching token factor (CSE.)
+ // We have to check each of the operands of the token factor for "small"
+ // token factors, so we queue them up. Adding the operands to the queue
+ // (stack) in reverse order maintains the original order and increases the
+ // likelihood that getNode will find a matching token factor (CSE.)
+ if (Chain.getNumOperands() > 16) {
+ Aliases.push_back(Chain);
+ break;
+ }
for (unsigned n = Chain.getNumOperands(); n;)
Chains.push_back(Chain.getOperand(--n));
- // Eliminate the token factor if we can.
- AddToWorkList(Chain.getNode());
+ ++Depth;
break;
default:
@@ -6184,15 +6325,10 @@ SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
// If a single operand then chain to it. We don't need to revisit it.
return Aliases[0];
}
-
+
// Construct a custom tailored token factor.
- SDValue NewChain = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
- &Aliases[0], Aliases.size());
-
- // Make sure the old chain gets cleaned up.
- if (NewChain != OldChain) AddToWorkList(OldChain.getNode());
-
- return NewChain;
+ return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
+ &Aliases[0], Aliases.size());
}
// SelectionDAG::Combine - This is the entry point for the file.
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index cd2d5ac..8e955af 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -57,7 +57,7 @@
using namespace llvm;
unsigned FastISel::getRegForValue(Value *V) {
- MVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
+ EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel.
if (!RealVT.isSimple())
return 0;
@@ -65,11 +65,11 @@ unsigned FastISel::getRegForValue(Value *V) {
// Ignore illegal types. We must do this before looking up the value
// in ValueMap because Arguments are given virtual registers regardless
// of whether FastISel can handle them.
- MVT::SimpleValueType VT = RealVT.getSimpleVT();
+ MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT)) {
// Promote MVT::i1 to a legal type though, because it's common and easy.
if (VT == MVT::i1)
- VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
+ VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
else
return 0;
}
@@ -92,13 +92,14 @@ unsigned FastISel::getRegForValue(Value *V) {
} else if (isa<ConstantPointerNull>(V)) {
// Translate this as an integer zero so that it can be
// local-CSE'd with actual integer zeros.
- Reg = getRegForValue(Constant::getNullValue(TD.getIntPtrType()));
+ Reg =
+ getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
if (!Reg) {
const APFloat &Flt = CF->getValueAPF();
- MVT IntVT = TLI.getPointerTy();
+ EVT IntVT = TLI.getPointerTy();
uint64_t x[2];
uint32_t IntBitWidth = IntVT.getSizeInBits();
@@ -108,7 +109,8 @@ unsigned FastISel::getRegForValue(Value *V) {
if (isExact) {
APInt IntVal(IntBitWidth, 2, x);
- unsigned IntegerReg = getRegForValue(ConstantInt::get(IntVal));
+ unsigned IntegerReg =
+ getRegForValue(ConstantInt::get(V->getContext(), IntVal));
if (IntegerReg != 0)
Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
}
@@ -174,13 +176,11 @@ unsigned FastISel::getRegForGEPIndex(Value *Idx) {
// If the index is smaller or larger than intptr_t, truncate or extend it.
MVT PtrVT = TLI.getPointerTy();
- MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
+ EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
if (IdxVT.bitsLT(PtrVT))
- IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
- ISD::SIGN_EXTEND, IdxN);
+ IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
else if (IdxVT.bitsGT(PtrVT))
- IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
- ISD::TRUNCATE, IdxN);
+ IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
return IdxN;
}
@@ -188,7 +188,7 @@ unsigned FastISel::getRegForGEPIndex(Value *Idx) {
/// which has an opcode which directly corresponds to the given ISD opcode.
///
bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
- MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
+ EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
@@ -203,7 +203,7 @@ bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
if (VT == MVT::i1 &&
(ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
ISDOpcode == ISD::XOR))
- VT = TLI.getTypeToTransformTo(VT);
+ VT = TLI.getTypeToTransformTo(I->getContext(), VT);
else
return false;
}
@@ -260,7 +260,7 @@ bool FastISel::SelectGetElementPtr(User *I) {
return false;
const Type *Ty = I->getOperand(0)->getType();
- MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
+ MVT VT = TLI.getPointerTy();
for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
OI != E; ++OI) {
Value *Idx = *OI;
@@ -335,7 +335,7 @@ bool FastISel::SelectCall(User *I) {
if (isValidDebugInfoIntrinsic(*RSI, CodeGenOpt::None) && DW
&& DW->ShouldEmitDwarfDebug()) {
unsigned ID =
- DW->RecordRegionStart(cast<GlobalVariable>(RSI->getContext()));
+ DW->RecordRegionStart(RSI->getContext());
const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
BuildMI(MBB, DL, II).addImm(ID);
}
@@ -346,7 +346,7 @@ bool FastISel::SelectCall(User *I) {
if (isValidDebugInfoIntrinsic(*REI, CodeGenOpt::None) && DW
&& DW->ShouldEmitDwarfDebug()) {
unsigned ID = 0;
- DISubprogram Subprogram(cast<GlobalVariable>(REI->getContext()));
+ DISubprogram Subprogram(REI->getContext());
if (isInlinedFnEnd(*REI, MF.getFunction())) {
// This is end of an inlined function.
const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
@@ -359,7 +359,7 @@ bool FastISel::SelectCall(User *I) {
BuildMI(MBB, DL, II).addImm(ID);
} else {
const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
- ID = DW->RecordRegionEnd(cast<GlobalVariable>(REI->getContext()));
+ ID = DW->RecordRegionEnd(REI->getContext());
BuildMI(MBB, DL, II).addImm(ID);
}
}
@@ -384,11 +384,10 @@ bool FastISel::SelectCall(User *I) {
setCurDebugLoc(ExtractDebugLocation(*FSI, MF.getDebugLocInfo()));
DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
- DISubprogram SP(cast<GlobalVariable>(FSI->getSubprogram()));
- unsigned LabelID = DW->RecordInlinedFnStart(SP,
- DICompileUnit(PrevLocTpl.CompileUnit),
- PrevLocTpl.Line,
- PrevLocTpl.Col);
+ DISubprogram SP(FSI->getSubprogram());
+ unsigned LabelID =
+ DW->RecordInlinedFnStart(SP,DICompileUnit(PrevLocTpl.Scope),
+ PrevLocTpl.Line, PrevLocTpl.Col);
const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
BuildMI(MBB, DL, II).addImm(LabelID);
return true;
@@ -398,7 +397,7 @@ bool FastISel::SelectCall(User *I) {
MF.setDefaultDebugLoc(ExtractDebugLocation(*FSI, MF.getDebugLocInfo()));
// llvm.dbg.func_start also defines beginning of function scope.
- DW->RecordRegionStart(cast<GlobalVariable>(FSI->getSubprogram()));
+ DW->RecordRegionStart(FSI->getSubprogram());
return true;
}
case Intrinsic::dbg_declare: {
@@ -407,7 +406,6 @@ bool FastISel::SelectCall(User *I) {
|| !DW->ShouldEmitDwarfDebug())
return true;
- Value *Variable = DI->getVariable();
Value *Address = DI->getAddress();
if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
Address = BCI->getOperand(0);
@@ -418,20 +416,15 @@ bool FastISel::SelectCall(User *I) {
StaticAllocaMap.find(AI);
if (SI == StaticAllocaMap.end()) break; // VLAs.
int FI = SI->second;
-
- // Determine the debug globalvariable.
- GlobalValue *GV = cast<GlobalVariable>(Variable);
-
- // Build the DECLARE instruction.
- const TargetInstrDesc &II = TII.get(TargetInstrInfo::DECLARE);
- MachineInstr *DeclareMI
- = BuildMI(MBB, DL, II).addFrameIndex(FI).addGlobalAddress(GV);
- DIVariable DV(cast<GlobalVariable>(GV));
- DW->RecordVariableScope(DV, DeclareMI);
+ if (MMI)
+ MMI->setVariableDbgInfo(DI->getVariable(), FI);
+#ifndef ATTACH_DEBUG_INFO_TO_AN_INSN
+ DW->RecordVariable(DI->getVariable(), FI);
+#endif
return true;
}
case Intrinsic::eh_exception: {
- MVT VT = TLI.getValueType(I->getType());
+ EVT VT = TLI.getValueType(I->getType());
switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
default: break;
case TargetLowering::Expand: {
@@ -449,15 +442,11 @@ bool FastISel::SelectCall(User *I) {
}
break;
}
- case Intrinsic::eh_selector_i32:
- case Intrinsic::eh_selector_i64: {
- MVT VT = TLI.getValueType(I->getType());
+ case Intrinsic::eh_selector: {
+ EVT VT = TLI.getValueType(I->getType());
switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
default: break;
case TargetLowering::Expand: {
- MVT VT = (IID == Intrinsic::eh_selector_i32 ?
- MVT::i32 : MVT::i64);
-
if (MMI) {
if (MBB->isLandingPad())
AddCatchInfo(*cast<CallInst>(I), MMI, MBB);
@@ -471,12 +460,25 @@ bool FastISel::SelectCall(User *I) {
}
unsigned Reg = TLI.getExceptionSelectorRegister();
- const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
+ EVT SrcVT = TLI.getPointerTy();
+ const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- Reg, RC, RC);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
+ RC, RC);
assert(InsertedCopy && "Can't copy address registers!");
InsertedCopy = InsertedCopy;
+
+ // Cast the register to the type of the selector.
+ if (SrcVT.bitsGT(MVT::i32))
+ ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
+ ResultReg);
+ else if (SrcVT.bitsLT(MVT::i32))
+ ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
+ ISD::SIGN_EXTEND, ResultReg);
+ if (ResultReg == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+
UpdateValueMap(I, ResultReg);
} else {
unsigned ResultReg =
@@ -493,8 +495,8 @@ bool FastISel::SelectCall(User *I) {
}
bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
- MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
- MVT DstVT = TLI.getValueType(I->getType());
+ EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple())
@@ -524,14 +526,14 @@ bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
// If the operand is i1, arrange for the high bits in the register to be zero.
if (SrcVT == MVT::i1) {
- SrcVT = TLI.getTypeToTransformTo(SrcVT);
+ SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
if (!InputReg)
return false;
}
// If the result is i1, truncate to the target's type for i1 first.
if (DstVT == MVT::i1)
- DstVT = TLI.getTypeToTransformTo(DstVT);
+ DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
DstVT.getSimpleVT(),
@@ -555,8 +557,8 @@ bool FastISel::SelectBitCast(User *I) {
}
// Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
- MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
- MVT DstVT = TLI.getValueType(I->getType());
+ EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple() ||
@@ -616,6 +618,49 @@ FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
MBB->addSuccessor(MSucc);
}
+/// SelectFNeg - Emit an FNeg operation.
+///
+bool
+FastISel::SelectFNeg(User *I) {
+ unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
+ if (OpReg == 0) return false;
+
+ // If the target has ISD::FNEG, use it.
+ EVT VT = TLI.getValueType(I->getType());
+ unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
+ ISD::FNEG, OpReg);
+ if (ResultReg != 0) {
+ UpdateValueMap(I, ResultReg);
+ return true;
+ }
+
+ // Bitcast the value to integer, twiddle the sign bit with xor,
+ // and then bitcast it back to floating-point.
+ if (VT.getSizeInBits() > 64) return false;
+ EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
+ if (!TLI.isTypeLegal(IntVT))
+ return false;
+
+ unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
+ ISD::BIT_CONVERT, OpReg);
+ if (IntReg == 0)
+ return false;
+
+ unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, IntReg,
+ UINT64_C(1) << (VT.getSizeInBits()-1),
+ IntVT.getSimpleVT());
+ if (IntResultReg == 0)
+ return false;
+
+ ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
+ ISD::BIT_CONVERT, IntResultReg);
+ if (ResultReg == 0)
+ return false;
+
+ UpdateValueMap(I, ResultReg);
+ return true;
+}
+
bool
FastISel::SelectOperator(User *I, unsigned Opcode) {
switch (Opcode) {
@@ -626,6 +671,9 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
case Instruction::Sub:
return SelectBinaryOp(I, ISD::SUB);
case Instruction::FSub:
+ // FNeg is currently represented in LLVM IR as a special case of FSub.
+ if (BinaryOperator::isFNeg(I))
+ return SelectFNeg(I);
return SelectBinaryOp(I, ISD::FSUB);
case Instruction::Mul:
return SelectBinaryOp(I, ISD::MUL);
@@ -709,8 +757,8 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
case Instruction::IntToPtr: // Deliberate fall-through.
case Instruction::PtrToInt: {
- MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
- MVT DstVT = TLI.getValueType(I->getType());
+ EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+ EVT DstVT = TLI.getValueType(I->getType());
if (DstVT.bitsGT(SrcVT))
return SelectCast(I, ISD::ZERO_EXTEND);
if (DstVT.bitsLT(SrcVT))
@@ -758,45 +806,44 @@ FastISel::FastISel(MachineFunction &mf,
FastISel::~FastISel() {}
-unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
+unsigned FastISel::FastEmit_(MVT, MVT,
ISD::NodeType) {
return 0;
}
-unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
+unsigned FastISel::FastEmit_r(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/) {
return 0;
}
-unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
+unsigned FastISel::FastEmit_rr(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/,
unsigned /*Op0*/) {
return 0;
}
-unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
- ISD::NodeType, uint64_t /*Imm*/) {
+unsigned FastISel::FastEmit_i(MVT, MVT, ISD::NodeType, uint64_t /*Imm*/) {
return 0;
}
-unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
+unsigned FastISel::FastEmit_f(MVT, MVT,
ISD::NodeType, ConstantFP * /*FPImm*/) {
return 0;
}
-unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
+unsigned FastISel::FastEmit_ri(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/,
uint64_t /*Imm*/) {
return 0;
}
-unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
+unsigned FastISel::FastEmit_rf(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/,
ConstantFP * /*FPImm*/) {
return 0;
}
-unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
+unsigned FastISel::FastEmit_rri(MVT, MVT,
ISD::NodeType,
unsigned /*Op0*/, unsigned /*Op1*/,
uint64_t /*Imm*/) {
@@ -807,9 +854,9 @@ unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
/// to emit an instruction with an immediate operand using FastEmit_ri.
/// If that fails, it materializes the immediate into a register and try
/// FastEmit_rr instead.
-unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
+unsigned FastISel::FastEmit_ri_(MVT VT, ISD::NodeType Opcode,
unsigned Op0, uint64_t Imm,
- MVT::SimpleValueType ImmType) {
+ MVT ImmType) {
// First check if immediate type is legal. If not, we can't use the ri form.
unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
if (ResultReg != 0)
@@ -824,9 +871,9 @@ unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
/// to emit an instruction with a floating-point immediate operand using
/// FastEmit_rf. If that fails, it materializes the immediate into a register
/// and try FastEmit_rr instead.
-unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
+unsigned FastISel::FastEmit_rf_(MVT VT, ISD::NodeType Opcode,
unsigned Op0, ConstantFP *FPImm,
- MVT::SimpleValueType ImmType) {
+ MVT ImmType) {
// First check if immediate type is legal. If not, we can't use the rf form.
unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
if (ResultReg != 0)
@@ -842,7 +889,7 @@ unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
// be replaced by code that creates a load from a constant-pool entry,
// which will require some target-specific work.
const APFloat &Flt = FPImm->getValueAPF();
- MVT IntVT = TLI.getPointerTy();
+ EVT IntVT = TLI.getPointerTy();
uint64_t x[2];
uint32_t IntBitWidth = IntVT.getSizeInBits();
@@ -987,7 +1034,7 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
return ResultReg;
}
-unsigned FastISel::FastEmitInst_extractsubreg(MVT::SimpleValueType RetVT,
+unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned Op0, uint32_t Idx) {
const TargetRegisterClass* RC = MRI.getRegClass(Op0);
@@ -1008,6 +1055,6 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT::SimpleValueType RetVT,
/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
/// with all but the least significant bit set to zero.
-unsigned FastISel::FastEmitZExtFromI1(MVT::SimpleValueType VT, unsigned Op) {
+unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) {
return FastEmit_ri(VT, VT, ISD::AND, Op, 1);
}
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
new file mode 100644
index 0000000..d3ffb2a
--- /dev/null
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -0,0 +1,693 @@
+//==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements the Emit routines for the SelectionDAG class, which creates
+// MachineInstrs based on the decisions of the SelectionDAG instruction
+// selection.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "instr-emitter"
+#include "InstrEmitter.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+using namespace llvm;
+
+/// CountResults - The results of target nodes have register or immediate
+/// operands first, then an optional chain, and optional flag operands (which do
+/// not go into the resulting MachineInstr).
+unsigned InstrEmitter::CountResults(SDNode *Node) {
+ unsigned N = Node->getNumValues();
+ while (N && Node->getValueType(N - 1) == MVT::Flag)
+ --N;
+ if (N && Node->getValueType(N - 1) == MVT::Other)
+ --N; // Skip over chain result.
+ return N;
+}
+
+/// CountOperands - The inputs to target nodes have any actual inputs first,
+/// followed by an optional chain operand, then an optional flag operand.
+/// Compute the number of actual operands that will go into the resulting
+/// MachineInstr.
+unsigned InstrEmitter::CountOperands(SDNode *Node) {
+ unsigned N = Node->getNumOperands();
+ while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
+ --N;
+ if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
+ --N; // Ignore chain if it exists.
+ return N;
+}
+
+/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
+/// implicit physical register output.
+void InstrEmitter::
+EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
+ unsigned SrcReg, DenseMap<SDValue, unsigned> &VRBaseMap) {
+ unsigned VRBase = 0;
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ // Just use the input register directly!
+ SDValue Op(Node, ResNo);
+ if (IsClone)
+ VRBaseMap.erase(Op);
+ bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
+ isNew = isNew; // Silence compiler warning.
+ assert(isNew && "Node emitted out of order - early");
+ return;
+ }
+
+ // If the node is only used by a CopyToReg and the dest reg is a vreg, use
+ // the CopyToReg'd destination register instead of creating a new vreg.
+ bool MatchReg = true;
+ const TargetRegisterClass *UseRC = NULL;
+ if (!IsClone && !IsCloned)
+ for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
+ UI != E; ++UI) {
+ SDNode *User = *UI;
+ bool Match = true;
+ if (User->getOpcode() == ISD::CopyToReg &&
+ User->getOperand(2).getNode() == Node &&
+ User->getOperand(2).getResNo() == ResNo) {
+ unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
+ VRBase = DestReg;
+ Match = false;
+ } else if (DestReg != SrcReg)
+ Match = false;
+ } else {
+ for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
+ SDValue Op = User->getOperand(i);
+ if (Op.getNode() != Node || Op.getResNo() != ResNo)
+ continue;
+ EVT VT = Node->getValueType(Op.getResNo());
+ if (VT == MVT::Other || VT == MVT::Flag)
+ continue;
+ Match = false;
+ if (User->isMachineOpcode()) {
+ const TargetInstrDesc &II = TII->get(User->getMachineOpcode());
+ const TargetRegisterClass *RC = 0;
+ if (i+II.getNumDefs() < II.getNumOperands())
+ RC = II.OpInfo[i+II.getNumDefs()].getRegClass(TRI);
+ if (!UseRC)
+ UseRC = RC;
+ else if (RC) {
+ const TargetRegisterClass *ComRC = getCommonSubClass(UseRC, RC);
+ // If multiple uses expect disjoint register classes, we emit
+ // copies in AddRegisterOperand.
+ if (ComRC)
+ UseRC = ComRC;
+ }
+ }
+ }
+ }
+ MatchReg &= Match;
+ if (VRBase)
+ break;
+ }
+
+ EVT VT = Node->getValueType(ResNo);
+ const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
+ SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, VT);
+
+ // Figure out the register class to create for the destreg.
+ if (VRBase) {
+ DstRC = MRI->getRegClass(VRBase);
+ } else if (UseRC) {
+ assert(UseRC->hasType(VT) && "Incompatible phys register def and uses!");
+ DstRC = UseRC;
+ } else {
+ DstRC = TLI->getRegClassFor(VT);
+ }
+
+ // If all uses are reading from the src physical register and copying the
+ // register is either impossible or very expensive, then don't create a copy.
+ if (MatchReg && SrcRC->getCopyCost() < 0) {
+ VRBase = SrcReg;
+ } else {
+ // Create the reg, emit the copy.
+ VRBase = MRI->createVirtualRegister(DstRC);
+ bool Emitted = TII->copyRegToReg(*MBB, InsertPos, VRBase, SrcReg,
+ DstRC, SrcRC);
+
+ assert(Emitted && "Unable to issue a copy instruction!\n");
+ (void) Emitted;
+ }
+
+ SDValue Op(Node, ResNo);
+ if (IsClone)
+ VRBaseMap.erase(Op);
+ bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
+ isNew = isNew; // Silence compiler warning.
+ assert(isNew && "Node emitted out of order - early");
+}
+
+/// getDstOfCopyToRegUse - If the only use of the specified result number of
+/// node is a CopyToReg, return its destination register. Return 0 otherwise.
+unsigned InstrEmitter::getDstOfOnlyCopyToRegUse(SDNode *Node,
+ unsigned ResNo) const {
+ if (!Node->hasOneUse())
+ return 0;
+
+ SDNode *User = *Node->use_begin();
+ if (User->getOpcode() == ISD::CopyToReg &&
+ User->getOperand(2).getNode() == Node &&
+ User->getOperand(2).getResNo() == ResNo) {
+ unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ return Reg;
+ }
+ return 0;
+}
+
+void InstrEmitter::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
+ const TargetInstrDesc &II,
+ bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ assert(Node->getMachineOpcode() != TargetInstrInfo::IMPLICIT_DEF &&
+ "IMPLICIT_DEF should have been handled as a special case elsewhere!");
+
+ for (unsigned i = 0; i < II.getNumDefs(); ++i) {
+ // If the specific node value is only used by a CopyToReg and the dest reg
+ // is a vreg in the same register class, use the CopyToReg'd destination
+ // register instead of creating a new vreg.
+ unsigned VRBase = 0;
+ const TargetRegisterClass *RC = II.OpInfo[i].getRegClass(TRI);
+ if (II.OpInfo[i].isOptionalDef()) {
+ // Optional def must be a physical register.
+ unsigned NumResults = CountResults(Node);
+ VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
+ assert(TargetRegisterInfo::isPhysicalRegister(VRBase));
+ MI->addOperand(MachineOperand::CreateReg(VRBase, true));
+ }
+
+ if (!VRBase && !IsClone && !IsCloned)
+ for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
+ UI != E; ++UI) {
+ SDNode *User = *UI;
+ if (User->getOpcode() == ISD::CopyToReg &&
+ User->getOperand(2).getNode() == Node &&
+ User->getOperand(2).getResNo() == i) {
+ unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
+ if (RegRC == RC) {
+ VRBase = Reg;
+ MI->addOperand(MachineOperand::CreateReg(Reg, true));
+ break;
+ }
+ }
+ }
+ }
+
+ // Create the result registers for this node and add the result regs to
+ // the machine instruction.
+ if (VRBase == 0) {
+ assert(RC && "Isn't a register operand!");
+ VRBase = MRI->createVirtualRegister(RC);
+ MI->addOperand(MachineOperand::CreateReg(VRBase, true));
+ }
+
+ SDValue Op(Node, i);
+ if (IsClone)
+ VRBaseMap.erase(Op);
+ bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
+ isNew = isNew; // Silence compiler warning.
+ assert(isNew && "Node emitted out of order - early");
+ }
+}
+
+/// getVR - Return the virtual register corresponding to the specified result
+/// of the specified node.
+unsigned InstrEmitter::getVR(SDValue Op,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ if (Op.isMachineOpcode() &&
+ Op.getMachineOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
+ // Add an IMPLICIT_DEF instruction before every use.
+ unsigned VReg = getDstOfOnlyCopyToRegUse(Op.getNode(), Op.getResNo());
+ // IMPLICIT_DEF can produce any type of result so its TargetInstrDesc
+ // does not include operand register class info.
+ if (!VReg) {
+ const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
+ VReg = MRI->createVirtualRegister(RC);
+ }
+ BuildMI(MBB, Op.getDebugLoc(),
+ TII->get(TargetInstrInfo::IMPLICIT_DEF), VReg);
+ return VReg;
+ }
+
+ DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op);
+ assert(I != VRBaseMap.end() && "Node emitted out of order - late");
+ return I->second;
+}
+
+
+/// AddRegisterOperand - Add the specified register as an operand to the
+/// specified machine instr. Insert register copies if the register is
+/// not in the required register class.
+void
+InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
+ unsigned IIOpNum,
+ const TargetInstrDesc *II,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ assert(Op.getValueType() != MVT::Other &&
+ Op.getValueType() != MVT::Flag &&
+ "Chain and flag operands should occur at end of operand list!");
+ // Get/emit the operand.
+ unsigned VReg = getVR(Op, VRBaseMap);
+ assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
+
+ const TargetInstrDesc &TID = MI->getDesc();
+ bool isOptDef = IIOpNum < TID.getNumOperands() &&
+ TID.OpInfo[IIOpNum].isOptionalDef();
+
+ // If the instruction requires a register in a different class, create
+ // a new virtual register and copy the value into it.
+ if (II) {
+ const TargetRegisterClass *SrcRC = MRI->getRegClass(VReg);
+ const TargetRegisterClass *DstRC = 0;
+ if (IIOpNum < II->getNumOperands())
+ DstRC = II->OpInfo[IIOpNum].getRegClass(TRI);
+ assert((DstRC || (TID.isVariadic() && IIOpNum >= TID.getNumOperands())) &&
+ "Don't have operand info for this instruction!");
+ if (DstRC && SrcRC != DstRC && !SrcRC->hasSuperClass(DstRC)) {
+ unsigned NewVReg = MRI->createVirtualRegister(DstRC);
+ bool Emitted = TII->copyRegToReg(*MBB, InsertPos, NewVReg, VReg,
+ DstRC, SrcRC);
+ assert(Emitted && "Unable to issue a copy instruction!\n");
+ (void) Emitted;
+ VReg = NewVReg;
+ }
+ }
+
+ MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
+}
+
+/// AddOperand - Add the specified operand to the specified machine instr. II
+/// specifies the instruction information for the node, and IIOpNum is the
+/// operand number (in the II) that we are adding. IIOpNum and II are used for
+/// assertions only.
+void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
+ unsigned IIOpNum,
+ const TargetInstrDesc *II,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ if (Op.isMachineOpcode()) {
+ AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap);
+ } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateImm(C->getSExtValue()));
+ } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
+ const ConstantFP *CFP = F->getConstantFPValue();
+ MI->addOperand(MachineOperand::CreateFPImm(CFP));
+ } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
+ } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(), TGA->getOffset(),
+ TGA->getTargetFlags()));
+ } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateMBB(BBNode->getBasicBlock()));
+ } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateFI(FI->getIndex()));
+ } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateJTI(JT->getIndex(),
+ JT->getTargetFlags()));
+ } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
+ int Offset = CP->getOffset();
+ unsigned Align = CP->getAlignment();
+ const Type *Type = CP->getType();
+ // MachineConstantPool wants an explicit alignment.
+ if (Align == 0) {
+ Align = TM->getTargetData()->getPrefTypeAlignment(Type);
+ if (Align == 0) {
+ // Alignment of vector types. FIXME!
+ Align = TM->getTargetData()->getTypeAllocSize(Type);
+ }
+ }
+
+ unsigned Idx;
+ MachineConstantPool *MCP = MF->getConstantPool();
+ if (CP->isMachineConstantPoolEntry())
+ Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Align);
+ else
+ Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Align);
+ MI->addOperand(MachineOperand::CreateCPI(Idx, Offset,
+ CP->getTargetFlags()));
+ } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
+ MI->addOperand(MachineOperand::CreateES(ES->getSymbol(),
+ ES->getTargetFlags()));
+ } else {
+ assert(Op.getValueType() != MVT::Other &&
+ Op.getValueType() != MVT::Flag &&
+ "Chain and flag operands should occur at end of operand list!");
+ AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap);
+ }
+}
+
+/// getSuperRegisterRegClass - Returns the register class of a superreg A whose
+/// "SubIdx"'th sub-register class is the specified register class and whose
+/// type matches the specified type.
+static const TargetRegisterClass*
+getSuperRegisterRegClass(const TargetRegisterClass *TRC,
+ unsigned SubIdx, EVT VT) {
+ // Pick the register class of the superegister for this type
+ for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(),
+ E = TRC->superregclasses_end(); I != E; ++I)
+ if ((*I)->hasType(VT) && (*I)->getSubRegisterRegClass(SubIdx) == TRC)
+ return *I;
+ assert(false && "Couldn't find the register class");
+ return 0;
+}
+
+/// EmitSubregNode - Generate machine code for subreg nodes.
+///
+void InstrEmitter::EmitSubregNode(SDNode *Node,
+ DenseMap<SDValue, unsigned> &VRBaseMap){
+ unsigned VRBase = 0;
+ unsigned Opc = Node->getMachineOpcode();
+
+ // If the node is only used by a CopyToReg and the dest reg is a vreg, use
+ // the CopyToReg'd destination register instead of creating a new vreg.
+ for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
+ UI != E; ++UI) {
+ SDNode *User = *UI;
+ if (User->getOpcode() == ISD::CopyToReg &&
+ User->getOperand(2).getNode() == Node) {
+ unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
+ if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
+ VRBase = DestReg;
+ break;
+ }
+ }
+ }
+
+ if (Opc == TargetInstrInfo::EXTRACT_SUBREG) {
+ unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+
+ // Create the extract_subreg machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
+ TII->get(TargetInstrInfo::EXTRACT_SUBREG));
+
+ // Figure out the register class to create for the destreg.
+ unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
+ const TargetRegisterClass *TRC = MRI->getRegClass(VReg);
+ const TargetRegisterClass *SRC = TRC->getSubRegisterRegClass(SubIdx);
+ assert(SRC && "Invalid subregister index in EXTRACT_SUBREG");
+
+ // Figure out the register class to create for the destreg.
+ // Note that if we're going to directly use an existing register,
+ // it must be precisely the required class, and not a subclass
+ // thereof.
+ if (VRBase == 0 || SRC != MRI->getRegClass(VRBase)) {
+ // Create the reg
+ assert(SRC && "Couldn't find source register class");
+ VRBase = MRI->createVirtualRegister(SRC);
+ }
+
+ // Add def, source, and subreg index
+ MI->addOperand(MachineOperand::CreateReg(VRBase, true));
+ AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap);
+ MI->addOperand(MachineOperand::CreateImm(SubIdx));
+ MBB->insert(InsertPos, MI);
+ } else if (Opc == TargetInstrInfo::INSERT_SUBREG ||
+ Opc == TargetInstrInfo::SUBREG_TO_REG) {
+ SDValue N0 = Node->getOperand(0);
+ SDValue N1 = Node->getOperand(1);
+ SDValue N2 = Node->getOperand(2);
+ unsigned SubReg = getVR(N1, VRBaseMap);
+ unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
+ const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
+ const TargetRegisterClass *SRC =
+ getSuperRegisterRegClass(TRC, SubIdx,
+ Node->getValueType(0));
+
+ // Figure out the register class to create for the destreg.
+ // Note that if we're going to directly use an existing register,
+ // it must be precisely the required class, and not a subclass
+ // thereof.
+ if (VRBase == 0 || SRC != MRI->getRegClass(VRBase)) {
+ // Create the reg
+ assert(SRC && "Couldn't find source register class");
+ VRBase = MRI->createVirtualRegister(SRC);
+ }
+
+ // Create the insert_subreg or subreg_to_reg machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc));
+ MI->addOperand(MachineOperand::CreateReg(VRBase, true));
+
+ // If creating a subreg_to_reg, then the first input operand
+ // is an implicit value immediate, otherwise it's a register
+ if (Opc == TargetInstrInfo::SUBREG_TO_REG) {
+ const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
+ MI->addOperand(MachineOperand::CreateImm(SD->getZExtValue()));
+ } else
+ AddOperand(MI, N0, 0, 0, VRBaseMap);
+ // Add the subregster being inserted
+ AddOperand(MI, N1, 0, 0, VRBaseMap);
+ MI->addOperand(MachineOperand::CreateImm(SubIdx));
+ MBB->insert(InsertPos, MI);
+ } else
+ llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
+
+ SDValue Op(Node, 0);
+ bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
+ isNew = isNew; // Silence compiler warning.
+ assert(isNew && "Node emitted out of order - early");
+}
+
+/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
+/// COPY_TO_REGCLASS is just a normal copy, except that the destination
+/// register is constrained to be in a particular register class.
+///
+void
+InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
+ const TargetRegisterClass *SrcRC = MRI->getRegClass(VReg);
+
+ unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ const TargetRegisterClass *DstRC = TRI->getRegClass(DstRCIdx);
+
+ // Create the new VReg in the destination class and emit a copy.
+ unsigned NewVReg = MRI->createVirtualRegister(DstRC);
+ bool Emitted = TII->copyRegToReg(*MBB, InsertPos, NewVReg, VReg,
+ DstRC, SrcRC);
+ assert(Emitted &&
+ "Unable to issue a copy instruction for a COPY_TO_REGCLASS node!\n");
+ (void) Emitted;
+
+ SDValue Op(Node, 0);
+ bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
+ isNew = isNew; // Silence compiler warning.
+ assert(isNew && "Node emitted out of order - early");
+}
+
+/// EmitNode - Generate machine code for an node and needed dependencies.
+///
+void InstrEmitter::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
+ // If machine instruction
+ if (Node->isMachineOpcode()) {
+ unsigned Opc = Node->getMachineOpcode();
+
+ // Handle subreg insert/extract specially
+ if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
+ Opc == TargetInstrInfo::INSERT_SUBREG ||
+ Opc == TargetInstrInfo::SUBREG_TO_REG) {
+ EmitSubregNode(Node, VRBaseMap);
+ return;
+ }
+
+ // Handle COPY_TO_REGCLASS specially.
+ if (Opc == TargetInstrInfo::COPY_TO_REGCLASS) {
+ EmitCopyToRegClassNode(Node, VRBaseMap);
+ return;
+ }
+
+ if (Opc == TargetInstrInfo::IMPLICIT_DEF)
+ // We want a unique VR for each IMPLICIT_DEF use.
+ return;
+
+ const TargetInstrDesc &II = TII->get(Opc);
+ unsigned NumResults = CountResults(Node);
+ unsigned NodeOperands = CountOperands(Node);
+ bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
+ II.getImplicitDefs() != 0;
+#ifndef NDEBUG
+ unsigned NumMIOperands = NodeOperands + NumResults;
+ assert((II.getNumOperands() == NumMIOperands ||
+ HasPhysRegOuts || II.isVariadic()) &&
+ "#operands for dag node doesn't match .td file!");
+#endif
+
+ // Create the new machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II);
+
+ // Add result register values for things that are defined by this
+ // instruction.
+ if (NumResults)
+ CreateVirtualRegisters(Node, MI, II, IsClone, IsCloned, VRBaseMap);
+
+ // Emit all of the actual operands of this instruction, adding them to the
+ // instruction as appropriate.
+ bool HasOptPRefs = II.getNumDefs() > NumResults;
+ assert((!HasOptPRefs || !HasPhysRegOuts) &&
+ "Unable to cope with optional defs and phys regs defs!");
+ unsigned NumSkip = HasOptPRefs ? II.getNumDefs() - NumResults : 0;
+ for (unsigned i = NumSkip; i != NodeOperands; ++i)
+ AddOperand(MI, Node->getOperand(i), i-NumSkip+II.getNumDefs(), &II,
+ VRBaseMap);
+
+ // Transfer all of the memory reference descriptions of this instruction.
+ MI->setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(),
+ cast<MachineSDNode>(Node)->memoperands_end());
+
+ if (II.usesCustomDAGSchedInsertionHook()) {
+ // Insert this instruction into the basic block using a target
+ // specific inserter which may returns a new basic block.
+ MBB = TLI->EmitInstrWithCustomInserter(MI, MBB, EM);
+ InsertPos = MBB->end();
+ } else {
+ MBB->insert(InsertPos, MI);
+ }
+
+ // Additional results must be an physical register def.
+ if (HasPhysRegOuts) {
+ for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
+ unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
+ if (Node->hasAnyUseOfValue(i))
+ EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
+ }
+ }
+ return;
+ }
+
+ switch (Node->getOpcode()) {
+ default:
+#ifndef NDEBUG
+ Node->dump();
+#endif
+ llvm_unreachable("This target-independent node should have been selected!");
+ break;
+ case ISD::EntryToken:
+ llvm_unreachable("EntryToken should have been excluded from the schedule!");
+ break;
+ case ISD::MERGE_VALUES:
+ case ISD::TokenFactor: // fall thru
+ break;
+ case ISD::CopyToReg: {
+ unsigned SrcReg;
+ SDValue SrcVal = Node->getOperand(2);
+ if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
+ SrcReg = R->getReg();
+ else
+ SrcReg = getVR(SrcVal, VRBaseMap);
+
+ unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
+ if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
+ break;
+
+ const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
+ // Get the register classes of the src/dst.
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg))
+ SrcTRC = MRI->getRegClass(SrcReg);
+ else
+ SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
+
+ if (TargetRegisterInfo::isVirtualRegister(DestReg))
+ DstTRC = MRI->getRegClass(DestReg);
+ else
+ DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
+ Node->getOperand(1).getValueType());
+
+ bool Emitted = TII->copyRegToReg(*MBB, InsertPos, DestReg, SrcReg,
+ DstTRC, SrcTRC);
+ assert(Emitted && "Unable to issue a copy instruction!\n");
+ (void) Emitted;
+ break;
+ }
+ case ISD::CopyFromReg: {
+ unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
+ EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
+ break;
+ }
+ case ISD::INLINEASM: {
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
+ --NumOps; // Ignore the flag operand.
+
+ // Create the inline asm machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
+ TII->get(TargetInstrInfo::INLINEASM));
+
+ // Add the asm string as an external symbol operand.
+ const char *AsmStr =
+ cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
+ MI->addOperand(MachineOperand::CreateES(AsmStr));
+
+ // Add all of the operand registers to the instruction.
+ for (unsigned i = 2; i != NumOps;) {
+ unsigned Flags =
+ cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+
+ MI->addOperand(MachineOperand::CreateImm(Flags));
+ ++i; // Skip the ID value.
+
+ switch (Flags & 7) {
+ default: llvm_unreachable("Bad flags!");
+ case 2: // Def of register.
+ for (; NumVals; --NumVals, ++i) {
+ unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+ MI->addOperand(MachineOperand::CreateReg(Reg, true));
+ }
+ break;
+ case 6: // Def of earlyclobber register.
+ for (; NumVals; --NumVals, ++i) {
+ unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+ MI->addOperand(MachineOperand::CreateReg(Reg, true, false, false,
+ false, false, true));
+ }
+ break;
+ case 1: // Use of register.
+ case 3: // Immediate.
+ case 4: // Addressing mode.
+ // The addressing mode has been selected, just add all of the
+ // operands to the machine instruction.
+ for (; NumVals; --NumVals, ++i)
+ AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
+ break;
+ }
+ }
+ MBB->insert(InsertPos, MI);
+ break;
+ }
+ }
+}
+
+/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
+/// at the given position in the given block.
+InstrEmitter::InstrEmitter(MachineBasicBlock *mbb,
+ MachineBasicBlock::iterator insertpos)
+ : MF(mbb->getParent()),
+ MRI(&MF->getRegInfo()),
+ TM(&MF->getTarget()),
+ TII(TM->getInstrInfo()),
+ TRI(TM->getRegisterInfo()),
+ TLI(TM->getTargetLowering()),
+ MBB(mbb), InsertPos(insertpos) {
+}
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.h b/lib/CodeGen/SelectionDAG/InstrEmitter.h
new file mode 100644
index 0000000..bb4634d
--- /dev/null
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.h
@@ -0,0 +1,119 @@
+//===---- InstrEmitter.h - Emit MachineInstrs for the SelectionDAG class ---==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This declares the Emit routines for the SelectionDAG class, which creates
+// MachineInstrs based on the decisions of the SelectionDAG instruction
+// selection.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INSTREMITTER_H
+#define INSTREMITTER_H
+
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+
+class TargetInstrDesc;
+
+class InstrEmitter {
+ MachineFunction *MF;
+ MachineRegisterInfo *MRI;
+ const TargetMachine *TM;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const TargetLowering *TLI;
+
+ MachineBasicBlock *MBB;
+ MachineBasicBlock::iterator InsertPos;
+
+ /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
+ /// implicit physical register output.
+ void EmitCopyFromReg(SDNode *Node, unsigned ResNo,
+ bool IsClone, bool IsCloned,
+ unsigned SrcReg,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
+
+ /// getDstOfCopyToRegUse - If the only use of the specified result number of
+ /// node is a CopyToReg, return its destination register. Return 0 otherwise.
+ unsigned getDstOfOnlyCopyToRegUse(SDNode *Node,
+ unsigned ResNo) const;
+
+ void CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
+ const TargetInstrDesc &II,
+ bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
+
+ /// getVR - Return the virtual register corresponding to the specified result
+ /// of the specified node.
+ unsigned getVR(SDValue Op,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
+
+ /// AddRegisterOperand - Add the specified register as an operand to the
+ /// specified machine instr. Insert register copies if the register is
+ /// not in the required register class.
+ void AddRegisterOperand(MachineInstr *MI, SDValue Op,
+ unsigned IIOpNum,
+ const TargetInstrDesc *II,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
+
+ /// AddOperand - Add the specified operand to the specified machine instr. II
+ /// specifies the instruction information for the node, and IIOpNum is the
+ /// operand number (in the II) that we are adding. IIOpNum and II are used for
+ /// assertions only.
+ void AddOperand(MachineInstr *MI, SDValue Op,
+ unsigned IIOpNum,
+ const TargetInstrDesc *II,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
+
+ /// EmitSubregNode - Generate machine code for subreg nodes.
+ ///
+ void EmitSubregNode(SDNode *Node, DenseMap<SDValue, unsigned> &VRBaseMap);
+
+ /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
+ /// COPY_TO_REGCLASS is just a normal copy, except that the destination
+ /// register is constrained to be in a particular register class.
+ ///
+ void EmitCopyToRegClassNode(SDNode *Node,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
+
+public:
+ /// CountResults - The results of target nodes have register or immediate
+ /// operands first, then an optional chain, and optional flag operands
+ /// (which do not go into the machine instrs.)
+ static unsigned CountResults(SDNode *Node);
+
+ /// CountOperands - The inputs to target nodes have any actual inputs first,
+ /// followed by an optional chain operand, then flag operands. Compute
+ /// the number of actual operands that will go into the resulting
+ /// MachineInstr.
+ static unsigned CountOperands(SDNode *Node);
+
+ /// EmitNode - Generate machine code for an node and needed dependencies.
+ ///
+ void EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM);
+
+ /// getBlock - Return the current basic block.
+ MachineBasicBlock *getBlock() { return MBB; }
+
+ /// getInsertPos - Return the current insertion position.
+ MachineBasicBlock::iterator getInsertPos() { return InsertPos; }
+
+ /// InstrEmitter - Construct an InstrEmitter and set it to start inserting
+ /// at the given position in the given block.
+ InstrEmitter(MachineBasicBlock *mbb, MachineBasicBlock::iterator insertpos);
+};
+
+}
+
+#endif
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 1413d95..fc01b07 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -30,9 +30,12 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -98,13 +101,14 @@ public:
/// getTypeAction - Return how we should legalize values of this type, either
/// it is already legal or we need to expand it into multiple registers of
/// smaller integer type, or we need to promote it to a larger type.
- LegalizeAction getTypeAction(MVT VT) const {
- return (LegalizeAction)ValueTypeActions.getTypeAction(VT);
+ LegalizeAction getTypeAction(EVT VT) const {
+ return
+ (LegalizeAction)ValueTypeActions.getTypeAction(*DAG.getContext(), VT);
}
/// isTypeLegal - Return true if this type is legal on this target.
///
- bool isTypeLegal(MVT VT) const {
+ bool isTypeLegal(EVT VT) const {
return getTypeAction(VT) == Legal;
}
@@ -131,14 +135,14 @@ private:
/// performs the same shuffe in terms of order or result bytes, but on a type
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
- SDValue ShuffleWithNarrowerEltType(MVT NVT, MVT VT, DebugLoc dl,
+ SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
SDValue N1, SDValue N2,
SmallVectorImpl<int> &Mask) const;
bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
SmallPtrSet<SDNode*, 32> &NodesLeadingTo);
- void LegalizeSetCCCondCode(MVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC,
+ void LegalizeSetCCCondCode(EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC,
DebugLoc dl);
SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
@@ -149,18 +153,18 @@ private:
RTLIB::Libcall Call_I32, RTLIB::Libcall Call_I64,
RTLIB::Libcall Call_I128);
- SDValue EmitStackConvert(SDValue SrcOp, MVT SlotVT, MVT DestVT, DebugLoc dl);
+ SDValue EmitStackConvert(SDValue SrcOp, EVT SlotVT, EVT DestVT, DebugLoc dl);
SDValue ExpandBUILD_VECTOR(SDNode *Node);
SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node);
SDValue ExpandDBG_STOPPOINT(SDNode *Node);
void ExpandDYNAMIC_STACKALLOC(SDNode *Node,
SmallVectorImpl<SDValue> &Results);
SDValue ExpandFCOPYSIGN(SDNode *Node);
- SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, MVT DestVT,
+ SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, EVT DestVT,
DebugLoc dl);
- SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, MVT DestVT, bool isSigned,
+ SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, EVT DestVT, bool isSigned,
DebugLoc dl);
- SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, MVT DestVT, bool isSigned,
+ SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, EVT DestVT, bool isSigned,
DebugLoc dl);
SDValue ExpandBSWAP(SDValue Op, DebugLoc dl);
@@ -179,10 +183,10 @@ private:
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue
-SelectionDAGLegalize::ShuffleWithNarrowerEltType(MVT NVT, MVT VT, DebugLoc dl,
+SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
SDValue N1, SDValue N2,
SmallVectorImpl<int> &Mask) const {
- MVT EltVT = NVT.getVectorElementType();
+ EVT EltVT = NVT.getVectorElementType();
unsigned NumMaskElts = VT.getVectorNumElements();
unsigned NumDestElts = NVT.getVectorNumElements();
unsigned NumEltsGrowth = NumDestElts / NumMaskElts;
@@ -342,7 +346,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
// double. This shrinks FP constants and canonicalizes them for targets where
// an FP extending load is the same cost as a normal load (such as on the x87
// fp stack or PPC FP unit).
- MVT VT = CFP->getValueType(0);
+ EVT VT = CFP->getValueType(0);
ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue());
if (!UseCP) {
assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion");
@@ -350,16 +354,16 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
(VT == MVT::f64) ? MVT::i64 : MVT::i32);
}
- MVT OrigVT = VT;
- MVT SVT = VT;
+ EVT OrigVT = VT;
+ EVT SVT = VT;
while (SVT != MVT::f32) {
- SVT = (MVT::SimpleValueType)(SVT.getSimpleVT() - 1);
+ SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
// smaller type.
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
TLI.ShouldShrinkFPConstant(OrigVT)) {
- const Type *SType = SVT.getTypeForMVT();
+ const Type *SType = SVT.getTypeForEVT(*DAG.getContext());
LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
VT = SVT;
Extend = true;
@@ -384,13 +388,13 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
SDValue Val = ST->getValue();
- MVT VT = Val.getValueType();
+ EVT VT = Val.getValueType();
int Alignment = ST->getAlignment();
int SVOffset = ST->getSrcValueOffset();
DebugLoc dl = ST->getDebugLoc();
if (ST->getMemoryVT().isFloatingPoint() ||
ST->getMemoryVT().isVector()) {
- MVT intVT = MVT::getIntegerVT(VT.getSizeInBits());
+ EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
if (TLI.isTypeLegal(intVT)) {
// Expand to a bitconvert of the value to the integer type of the
// same size, then a (misaligned) int store.
@@ -401,9 +405,9 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
} else {
// Do a (aligned) store to a stack slot, then copy from the stack slot
// to the final destination using (unaligned) integer loads and stores.
- MVT StoredVT = ST->getMemoryVT();
- MVT RegVT =
- TLI.getRegisterType(MVT::getIntegerVT(StoredVT.getSizeInBits()));
+ EVT StoredVT = ST->getMemoryVT();
+ EVT RegVT =
+ TLI.getRegisterType(*DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), StoredVT.getSizeInBits()));
unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
unsigned RegBytes = RegVT.getSizeInBits() / 8;
unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
@@ -437,7 +441,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// The last store may be partial. Do a truncating store. On big-endian
// machines this requires an extending load from the stack slot to ensure
// that the bits are in the right place.
- MVT MemVT = MVT::getIntegerVT(8 * (StoredBytes - Offset));
+ EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset));
// Load from the stack slot.
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
@@ -456,8 +460,8 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
!ST->getMemoryVT().isVector() &&
"Unaligned store of unknown type.");
// Get the half-size VT
- MVT NewStoredVT =
- (MVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT() - 1);
+ EVT NewStoredVT =
+ (MVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT().SimpleTy - 1);
int NumBits = NewStoredVT.getSizeInBits();
int IncrementSize = NumBits / 8;
@@ -488,11 +492,11 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
int SVOffset = LD->getSrcValueOffset();
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
- MVT VT = LD->getValueType(0);
- MVT LoadedVT = LD->getMemoryVT();
+ EVT VT = LD->getValueType(0);
+ EVT LoadedVT = LD->getMemoryVT();
DebugLoc dl = LD->getDebugLoc();
if (VT.isFloatingPoint() || VT.isVector()) {
- MVT intVT = MVT::getIntegerVT(LoadedVT.getSizeInBits());
+ EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits());
if (TLI.isTypeLegal(intVT)) {
// Expand to a (misaligned) integer load of the same size,
// then bitconvert to floating point or vector.
@@ -508,7 +512,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
} else {
// Copy the value to a (aligned) stack slot using (unaligned) integer
// loads and stores, then do a (aligned) load from the stack slot.
- MVT RegVT = TLI.getRegisterType(intVT);
+ EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
unsigned LoadedBytes = LoadedVT.getSizeInBits() / 8;
unsigned RegBytes = RegVT.getSizeInBits() / 8;
unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes;
@@ -538,7 +542,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
}
// The last copy may be partial. Do an extending load.
- MVT MemVT = MVT::getIntegerVT(8 * (LoadedBytes - Offset));
+ EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (LoadedBytes - Offset));
SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
LD->getSrcValue(), SVOffset + Offset,
MemVT, LD->isVolatile(),
@@ -568,8 +572,8 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// Compute the new VT that is half the size of the old one. This is an
// integer MVT.
unsigned NumBits = LoadedVT.getSizeInBits();
- MVT NewLoadedVT;
- NewLoadedVT = MVT::getIntegerVT(NumBits/2);
+ EVT NewLoadedVT;
+ NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2);
NumBits >>= 1;
unsigned Alignment = LD->getAlignment();
@@ -629,10 +633,10 @@ PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx,
// with a "move to register" or "extload into register" instruction, then
// permute it into place, if the idx is a constant and if the idx is
// supported by the target.
- MVT VT = Tmp1.getValueType();
- MVT EltVT = VT.getVectorElementType();
- MVT IdxVT = Tmp3.getValueType();
- MVT PtrVT = TLI.getPointerTy();
+ EVT VT = Tmp1.getValueType();
+ EVT EltVT = VT.getVectorElementType();
+ EVT IdxVT = Tmp3.getValueType();
+ EVT PtrVT = TLI.getPointerTy();
SDValue StackPtr = DAG.CreateStackTemporary(VT);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
@@ -663,7 +667,7 @@ ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, DebugLoc dl) {
// SCALAR_TO_VECTOR requires that the type of the value being inserted
// match the element type of the vector being created, except for
// integers in which case the inserted value can be over width.
- MVT EltVT = Vec.getValueType().getVectorElementType();
+ EVT EltVT = Vec.getValueType().getVectorElementType();
if (Val.getValueType() == EltVT ||
(EltVT.isInteger() && Val.getValueType().bitsGE(EltVT))) {
SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
@@ -785,7 +789,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
case ISD::FP_ROUND_INREG:
case ISD::SIGN_EXTEND_INREG: {
- MVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT();
+ EVT InnerType = cast<VTSDNode>(Node->getOperand(1))->getVT();
Action = TLI.getOperationAction(Node->getOpcode(), InnerType);
break;
}
@@ -795,7 +799,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 :
Node->getOpcode() == ISD::SETCC ? 2 : 1;
unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : 0;
- MVT OpVT = Node->getOperand(CompareOperand).getValueType();
+ EVT OpVT = Node->getOperand(CompareOperand).getValueType();
ISD::CondCode CCCode =
cast<CondCodeSDNode>(Node->getOperand(CCOperand))->get();
Action = TLI.getCondCodeAction(CCCode, OpVT);
@@ -821,11 +825,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// special case should be done as part of making LegalizeDAG non-recursive.
SimpleFinishLegalizing = false;
break;
- case ISD::CALL:
- // FIXME: Legalization for calls requires custom-lowering the call before
- // legalizing the operands! (I haven't looked into precisely why.)
- SimpleFinishLegalizing = false;
- break;
case ISD::EXTRACT_ELEMENT:
case ISD::FLT_ROUNDS_:
case ISD::SADDO:
@@ -847,7 +846,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::TRAMPOLINE:
case ISD::FRAMEADDR:
case ISD::RETURNADDR:
- case ISD::FORMAL_ARGUMENTS:
// These operations lie about being legal: when they claim to be legal,
// they should actually be custom-lowered.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
@@ -885,7 +883,6 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::BR_JT:
case ISD::BR_CC:
case ISD::BRCOND:
- case ISD::RET:
// Branches tweak the chain to include LastCALLSEQ_END
Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0],
LastCALLSEQ_END);
@@ -902,6 +899,14 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (!Ops[1].getValueType().isVector())
Ops[1] = LegalizeOp(DAG.getShiftAmountOperand(Ops[1]));
break;
+ case ISD::SRL_PARTS:
+ case ISD::SRA_PARTS:
+ case ISD::SHL_PARTS:
+ // Legalizing shifts/rotates requires adjusting the shift amount
+ // to the appropriate width.
+ if (!Ops[2].getValueType().isVector())
+ Ops[2] = LegalizeOp(DAG.getShiftAmountOperand(Ops[2]));
+ break;
}
Result = DAG.UpdateNodeOperands(Result.getValue(0), Ops.data(),
@@ -946,44 +951,15 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
switch (Node->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "NODE: "; Node->dump(&DAG); cerr << "\n";
+ errs() << "NODE: ";
+ Node->dump(&DAG);
+ errs() << "\n";
#endif
- assert(0 && "Do not know how to legalize this operator!");
- abort();
- case ISD::CALL:
- // The only option for this is to custom lower it.
- Tmp3 = TLI.LowerOperation(Result.getValue(0), DAG);
- assert(Tmp3.getNode() && "Target didn't custom lower this node!");
- // A call within a calling sequence must be legalized to something
- // other than the normal CALLSEQ_END. Violating this gets Legalize
- // into an infinite loop.
- assert ((!IsLegalizingCall ||
- Node->getOpcode() != ISD::CALL ||
- Tmp3.getNode()->getOpcode() != ISD::CALLSEQ_END) &&
- "Nested CALLSEQ_START..CALLSEQ_END not supported.");
-
- // The number of incoming and outgoing values should match; unless the final
- // outgoing value is a flag.
- assert((Tmp3.getNode()->getNumValues() == Result.getNode()->getNumValues() ||
- (Tmp3.getNode()->getNumValues() == Result.getNode()->getNumValues() + 1 &&
- Tmp3.getNode()->getValueType(Tmp3.getNode()->getNumValues() - 1) ==
- MVT::Flag)) &&
- "Lowering call/formal_arguments produced unexpected # results!");
-
- // Since CALL/FORMAL_ARGUMENTS nodes produce multiple values, make sure to
- // remember that we legalized all of them, so it doesn't get relegalized.
- for (unsigned i = 0, e = Tmp3.getNode()->getNumValues(); i != e; ++i) {
- if (Tmp3.getNode()->getValueType(i) == MVT::Flag)
- continue;
- Tmp1 = LegalizeOp(Tmp3.getValue(i));
- if (Op.getResNo() == i)
- Tmp2 = Tmp1;
- AddLegalizedOperand(SDValue(Node, i), Tmp1);
- }
- return Tmp2;
+ llvm_unreachable("Do not know how to legalize this operator!");
+
case ISD::BUILD_VECTOR:
switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
Tmp3 = TLI.LowerOperation(Result, DAG);
if (Tmp3.getNode()) {
@@ -1094,22 +1070,22 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD) {
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
Tmp3 = Result.getValue(0);
Tmp4 = Result.getValue(1);
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned load and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses()) {
- unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(LD->getMemoryVT().getTypeForMVT());
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), DAG,
- TLI);
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
+ DAG, TLI);
Tmp3 = Result.getOperand(0);
Tmp4 = Result.getOperand(1);
Tmp3 = LegalizeOp(Tmp3);
@@ -1128,7 +1104,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Only promote a load of vector type to another.
assert(VT.isVector() && "Cannot promote this load!");
// Change base type to a different vector type.
- MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
+ EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getSrcValue(),
LD->getSrcValueOffset(),
@@ -1144,7 +1120,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
AddLegalizedOperand(SDValue(Node, 1), Tmp4);
return Op.getResNo() ? Tmp4 : Tmp3;
} else {
- MVT SrcVT = LD->getMemoryVT();
+ EVT SrcVT = LD->getMemoryVT();
unsigned SrcWidth = SrcVT.getSizeInBits();
int SVOffset = LD->getSrcValueOffset();
unsigned Alignment = LD->getAlignment();
@@ -1163,7 +1139,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Promote to a byte-sized load if not loading an integral number of
// bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
unsigned NewWidth = SrcVT.getStoreSizeInBits();
- MVT NVT = MVT::getIntegerVT(NewWidth);
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(), NewWidth);
SDValue Ch;
// The extra bits are guaranteed to be zero, since we stored them that
@@ -1201,8 +1177,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
assert(ExtraWidth < RoundWidth);
assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
"Load size not an integral number of bytes!");
- MVT RoundVT = MVT::getIntegerVT(RoundWidth);
- MVT ExtraVT = MVT::getIntegerVT(ExtraWidth);
+ EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
+ EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
SDValue Lo, Hi, Ch;
unsigned IncrementSize;
@@ -1269,7 +1245,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp2 = LegalizeOp(Ch);
} else {
switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
// FALLTHROUGH
@@ -1287,12 +1263,12 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
} else {
// If this is an unaligned load and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses()) {
- unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(LD->getMemoryVT().getTypeForMVT());
+ if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
+ const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()), DAG,
- TLI);
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
+ DAG, TLI);
Tmp1 = Result.getOperand(0);
Tmp2 = Result.getOperand(1);
Tmp1 = LegalizeOp(Tmp1);
@@ -1303,10 +1279,13 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
case TargetLowering::Expand:
// f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
- if (SrcVT == MVT::f32 && Node->getValueType(0) == MVT::f64) {
+ // f128 = EXTLOAD {f32,f64} too
+ if ((SrcVT == MVT::f32 && (Node->getValueType(0) == MVT::f64 ||
+ Node->getValueType(0) == MVT::f128)) ||
+ (SrcVT == MVT::f64 && Node->getValueType(0) == MVT::f128)) {
SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, LD->getSrcValue(),
- LD->getSrcValueOffset(),
- LD->isVolatile(), LD->getAlignment());
+ LD->getSrcValueOffset(),
+ LD->isVolatile(), LD->getAlignment());
Result = DAG.getNode(ISD::FP_EXTEND, dl,
Node->getValueType(0), Load);
Tmp1 = LegalizeOp(Result); // Relegalize new nodes.
@@ -1359,18 +1338,18 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
ST->getOffset());
- MVT VT = Tmp3.getValueType();
+ EVT VT = Tmp3.getValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses()) {
- unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(ST->getMemoryVT().getTypeForMVT());
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
- Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), DAG,
- TLI);
+ Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
+ DAG, TLI);
}
break;
case TargetLowering::Custom:
@@ -1391,14 +1370,14 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
} else {
Tmp3 = LegalizeOp(ST->getValue());
- MVT StVT = ST->getMemoryVT();
+ EVT StVT = ST->getMemoryVT();
unsigned StWidth = StVT.getSizeInBits();
if (StWidth != StVT.getStoreSizeInBits()) {
// Promote to a byte-sized store with upper bits zero if not
// storing an integral number of bytes. For example, promote
// TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
- MVT NVT = MVT::getIntegerVT(StVT.getStoreSizeInBits());
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StVT.getStoreSizeInBits());
Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT);
Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
SVOffset, NVT, isVolatile, Alignment);
@@ -1412,8 +1391,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
assert(ExtraWidth < RoundWidth);
assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
"Store size not an integral number of bytes!");
- MVT RoundVT = MVT::getIntegerVT(RoundWidth);
- MVT ExtraVT = MVT::getIntegerVT(ExtraWidth);
+ EVT RoundVT = EVT::getIntegerVT(*DAG.getContext(), RoundWidth);
+ EVT ExtraVT = EVT::getIntegerVT(*DAG.getContext(), ExtraWidth);
SDValue Lo, Hi;
unsigned IncrementSize;
@@ -1460,16 +1439,16 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
ST->getOffset());
switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
- default: assert(0 && "This action is not supported yet!");
+ default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsUnalignedMemoryAccesses()) {
- unsigned ABIAlignment = TLI.getTargetData()->
- getABITypeAlignment(ST->getMemoryVT().getTypeForMVT());
+ if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
+ const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
- Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()), DAG,
- TLI);
+ Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
+ DAG, TLI);
}
break;
case TargetLowering::Custom:
@@ -1522,7 +1501,11 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr);
- return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, NULL, 0);
+ if (Op.getValueType().isVector())
+ return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, NULL, 0);
+ else
+ return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
+ NULL, 0, Vec.getValueType().getVectorElementType());
}
SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
@@ -1530,8 +1513,8 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
// aligned object on the stack, store each element into it, then load
// the result as a vector.
// Create the stack frame object.
- MVT VT = Node->getValueType(0);
- MVT OpVT = Node->getOperand(0).getValueType();
+ EVT VT = Node->getValueType(0);
+ EVT OpVT = Node->getOperand(0).getValueType();
DebugLoc dl = Node->getDebugLoc();
SDValue FIPtr = DAG.CreateStackTemporary(VT);
int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
@@ -1574,7 +1557,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
"Ugly special-cased code!");
// Get the sign bit of the RHS.
SDValue SignBit;
- MVT IVT = Tmp2.getValueType() == MVT::f64 ? MVT::i64 : MVT::i32;
+ EVT IVT = Tmp2.getValueType() == MVT::f64 ? MVT::i64 : MVT::i32;
if (isTypeLegal(IVT)) {
SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2);
} else {
@@ -1613,9 +1596,8 @@ SDValue SelectionDAGLegalize::ExpandDBG_STOPPOINT(SDNode* Node) {
bool useLABEL = TLI.isOperationLegalOrCustom(ISD::DBG_LABEL, MVT::Other);
const DbgStopPointSDNode *DSP = cast<DbgStopPointSDNode>(Node);
- GlobalVariable *CU_GV = cast<GlobalVariable>(DSP->getCompileUnit());
- if (DW && (useDEBUG_LOC || useLABEL) && !CU_GV->isDeclaration()) {
- DICompileUnit CU(cast<GlobalVariable>(DSP->getCompileUnit()));
+ MDNode *CU_Node = DSP->getCompileUnit();
+ if (DW && (useDEBUG_LOC || useLABEL)) {
unsigned Line = DSP->getLine();
unsigned Col = DSP->getColumn();
@@ -1627,9 +1609,9 @@ SDValue SelectionDAGLegalize::ExpandDBG_STOPPOINT(SDNode* Node) {
return DAG.getNode(ISD::DEBUG_LOC, dl, MVT::Other, Node->getOperand(0),
DAG.getConstant(Line, MVT::i32),
DAG.getConstant(Col, MVT::i32),
- DAG.getSrcValue(CU.getGV()));
+ DAG.getSrcValue(CU_Node));
} else {
- unsigned ID = DW->RecordSourceLine(Line, Col, CU);
+ unsigned ID = DW->RecordSourceLine(Line, Col, CU_Node);
return DAG.getLabel(ISD::DBG_LABEL, dl, Node->getOperand(0), ID);
}
}
@@ -1643,7 +1625,7 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
DebugLoc dl = Node->getDebugLoc();
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
SDValue Tmp1 = SDValue(Node, 0);
SDValue Tmp2 = SDValue(Node, 1);
SDValue Tmp3 = Node->getOperand(2);
@@ -1676,14 +1658,14 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
/// condition code CC on the current target. This routine assumes LHS and rHS
/// have already been legalized by LegalizeSetCCOperands. It expands SETCC with
/// illegal condition code into AND / OR of multiple SETCC values.
-void SelectionDAGLegalize::LegalizeSetCCCondCode(MVT VT,
+void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
SDValue &LHS, SDValue &RHS,
SDValue &CC,
DebugLoc dl) {
- MVT OpVT = LHS.getValueType();
+ EVT OpVT = LHS.getValueType();
ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
switch (TLI.getCondCodeAction(CCCode, OpVT)) {
- default: assert(0 && "Unknown condition code action!");
+ default: llvm_unreachable("Unknown condition code action!");
case TargetLowering::Legal:
// Nothing to do.
break;
@@ -1691,7 +1673,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(MVT VT,
ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID;
unsigned Opc = 0;
switch (CCCode) {
- default: assert(0 && "Don't know how to expand this condition!"); abort();
+ default: llvm_unreachable("Don't know how to expand this condition!");
case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break;
case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break;
case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break;
@@ -1722,13 +1704,13 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(MVT VT,
/// a load from the stack slot to DestVT, extending it if needed.
/// The resultant code need not be legal.
SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
- MVT SlotVT,
- MVT DestVT,
+ EVT SlotVT,
+ EVT DestVT,
DebugLoc dl) {
// Create the stack frame object.
unsigned SrcAlign =
TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType().
- getTypeForMVT());
+ getTypeForEVT(*DAG.getContext()));
SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
@@ -1739,7 +1721,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits();
unsigned DestAlign =
- TLI.getTargetData()->getPrefTypeAlignment(DestVT.getTypeForMVT());
+ TLI.getTargetData()->getPrefTypeAlignment(DestVT.getTypeForEVT(*DAG.getContext()));
// Emit a store to the stack slot. Use a truncstore if the input value is
// later than DestVT.
@@ -1787,9 +1769,9 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
unsigned NumElems = Node->getNumOperands();
SDValue Value1, Value2;
DebugLoc dl = Node->getDebugLoc();
- MVT VT = Node->getValueType(0);
- MVT OpVT = Node->getOperand(0).getValueType();
- MVT EltVT = VT.getVectorElementType();
+ EVT VT = Node->getValueType(0);
+ EVT OpVT = Node->getOperand(0).getValueType();
+ EVT EltVT = VT.getVectorElementType();
// If the only non-undef value is the low element, turn this into a
// SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
@@ -1833,7 +1815,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue()));
} else {
assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
- const Type *OpNTy = OpVT.getTypeForMVT();
+ const Type *OpNTy = OpVT.getTypeForEVT(*DAG.getContext());
CV.push_back(UndefValue::get(OpNTy));
}
}
@@ -1886,8 +1868,8 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
- MVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForMVT();
+ EVT ArgVT = Node->getOperand(i).getValueType();
+ const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
@@ -1897,10 +1879,12 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy = Node->getValueType(0).getTypeForMVT();
+ const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
- 0, CallingConv::C, false, Callee, Args, DAG,
+ 0, TLI.getLibcallCallingConv(LC), false,
+ /*isReturnValueUsed=*/true,
+ Callee, Args, DAG,
Node->getDebugLoc());
// Legalize the call sequence, starting with the chain. This will advance
@@ -1916,8 +1900,8 @@ SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_PPCF128) {
RTLIB::Libcall LC;
- switch (Node->getValueType(0).getSimpleVT()) {
- default: assert(0 && "Unexpected request for libcall!");
+ switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unexpected request for libcall!");
case MVT::f32: LC = Call_F32; break;
case MVT::f64: LC = Call_F64; break;
case MVT::f80: LC = Call_F80; break;
@@ -1932,8 +1916,8 @@ SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
RTLIB::Libcall Call_I64,
RTLIB::Libcall Call_I128) {
RTLIB::Libcall LC;
- switch (Node->getValueType(0).getSimpleVT()) {
- default: assert(0 && "Unexpected request for libcall!");
+ switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unexpected request for libcall!");
case MVT::i16: LC = Call_I16; break;
case MVT::i32: LC = Call_I32; break;
case MVT::i64: LC = Call_I64; break;
@@ -1948,7 +1932,7 @@ SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
/// legal for the target.
SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Op0,
- MVT DestVT,
+ EVT DestVT,
DebugLoc dl) {
if (Op0.getValueType() == MVT::i32) {
// simple 32-bit [signed|unsigned] integer to float/double expansion
@@ -2018,15 +2002,16 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
// as a negative number. To counteract this, the dynamic code adds an
// offset depending on the data type.
uint64_t FF;
- switch (Op0.getValueType().getSimpleVT()) {
- default: assert(0 && "Unsupported integer type!");
+ switch (Op0.getValueType().getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unsupported integer type!");
case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
}
if (TLI.isLittleEndian()) FF <<= 32;
- Constant *FudgeFactor = ConstantInt::get(Type::Int64Ty, FF);
+ Constant *FudgeFactor = ConstantInt::get(
+ Type::getInt64Ty(*DAG.getContext()), FF);
SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
@@ -2054,17 +2039,17 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
/// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
/// operation that takes a larger input.
SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
- MVT DestVT,
+ EVT DestVT,
bool isSigned,
DebugLoc dl) {
// First step, figure out the appropriate *INT_TO_FP operation to use.
- MVT NewInTy = LegalOp.getValueType();
+ EVT NewInTy = LegalOp.getValueType();
unsigned OpToUse = 0;
// Scan for the appropriate larger type to use.
while (1) {
- NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT()+1);
+ NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1);
assert(NewInTy.isInteger() && "Ran out of possibilities!");
// If the target supports SINT_TO_FP of this type, use it.
@@ -2096,17 +2081,17 @@ SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
/// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
/// operation that returns a larger result.
SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
- MVT DestVT,
+ EVT DestVT,
bool isSigned,
DebugLoc dl) {
// First step, figure out the appropriate FP_TO*INT operation to use.
- MVT NewOutTy = DestVT;
+ EVT NewOutTy = DestVT;
unsigned OpToUse = 0;
// Scan for the appropriate larger type to use.
while (1) {
- NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT()+1);
+ NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1);
assert(NewOutTy.isInteger() && "Ran out of possibilities!");
if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) {
@@ -2134,11 +2119,11 @@ SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
/// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
///
SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
- MVT VT = Op.getValueType();
- MVT SHVT = TLI.getShiftAmountTy();
+ EVT VT = Op.getValueType();
+ EVT SHVT = TLI.getShiftAmountTy();
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
- switch (VT.getSimpleVT()) {
- default: assert(0 && "Unhandled Expand type in BSWAP!"); abort();
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unhandled Expand type in BSWAP!");
case MVT::i16:
Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
@@ -2183,15 +2168,15 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
DebugLoc dl) {
switch (Opc) {
- default: assert(0 && "Cannot expand this yet!");
+ default: llvm_unreachable("Cannot expand this yet!");
case ISD::CTPOP: {
static const uint64_t mask[6] = {
0x5555555555555555ULL, 0x3333333333333333ULL,
0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL,
0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL
};
- MVT VT = Op.getValueType();
- MVT ShVT = TLI.getShiftAmountTy();
+ EVT VT = Op.getValueType();
+ EVT ShVT = TLI.getShiftAmountTy();
unsigned len = VT.getSizeInBits();
for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
//x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8])
@@ -2217,8 +2202,8 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
// return popcount(~x);
//
// but see also: http://www.hackersdelight.org/HDcode/nlz.cc
- MVT VT = Op.getValueType();
- MVT ShVT = TLI.getShiftAmountTy();
+ EVT VT = Op.getValueType();
+ EVT ShVT = TLI.getShiftAmountTy();
unsigned len = VT.getSizeInBits();
for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT);
@@ -2233,7 +2218,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
// unless the target has ctlz but not ctpop, in which case we use:
// { return 32 - nlz(~x & (x-1)); }
// see also http://www.hackersdelight.org/HDcode/ntz.cc
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
SDValue Tmp3 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNOT(dl, Op, VT),
DAG.getNode(ISD::SUB, dl, VT, Op,
@@ -2272,7 +2257,6 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Results.push_back(DAG.getConstant(1, Node->getValueType(0)));
break;
case ISD::EH_RETURN:
- case ISD::DECLARE:
case ISD::DBG_LABEL:
case ISD::EH_LABEL:
case ISD::PREFETCH:
@@ -2291,21 +2275,22 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Results.push_back(Node->getOperand(i));
break;
case ISD::UNDEF: {
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
if (VT.isInteger())
Results.push_back(DAG.getConstant(0, VT));
else if (VT.isFloatingPoint())
Results.push_back(DAG.getConstantFP(0, VT));
else
- assert(0 && "Unknown value type!");
+ llvm_unreachable("Unknown value type!");
break;
}
case ISD::TRAP: {
// If this operation is not supported, lower it to 'abort()' call
TargetLowering::ArgListTy Args;
std::pair<SDValue, SDValue> CallResult =
- TLI.LowerCallTo(Node->getOperand(0), Type::VoidTy,
+ TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
false, false, false, false, 0, CallingConv::C, false,
+ /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("abort", TLI.getPointerTy()),
Args, DAG, dl);
Results.push_back(CallResult.second);
@@ -2326,7 +2311,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::SIGN_EXTEND_INREG: {
// NOTE: we could fall back on load/store here too for targets without
// SAR. However, it is doubtful that any exist.
- MVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
+ EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
unsigned BitsDiff = Node->getValueType(0).getSizeInBits() -
ExtraVT.getSizeInBits();
SDValue ShiftCst = DAG.getConstant(BitsDiff, TLI.getShiftAmountTy());
@@ -2343,7 +2328,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
// NOTE: there is a choice here between constantly creating new stack
// slots and always reusing the same one. We currently always create
// new ones, as reuse may inhibit scheduling.
- MVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
+ EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
Tmp1 = EmitStackConvert(Node->getOperand(0), ExtraVT,
Node->getValueType(0), dl);
Results.push_back(Tmp1);
@@ -2357,8 +2342,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
case ISD::FP_TO_UINT: {
SDValue True, False;
- MVT VT = Node->getOperand(0).getValueType();
- MVT NVT = Node->getValueType(0);
+ EVT VT = Node->getOperand(0).getValueType();
+ EVT NVT = Node->getValueType(0);
const uint64_t zero[] = {0, 0};
APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero));
APInt x = APInt::getSignBit(NVT.getSizeInBits());
@@ -2379,14 +2364,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
}
case ISD::VAARG: {
const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
SDValue VAList = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0);
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
DAG.getConstant(TLI.getTargetData()->
- getTypeAllocSize(VT.getTypeForMVT()),
+ getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0);
@@ -2434,8 +2419,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
SmallVector<int, 8> Mask;
cast<ShuffleVectorSDNode>(Node)->getMask(Mask);
- MVT VT = Node->getValueType(0);
- MVT EltVT = VT.getVectorElementType();
+ EVT VT = Node->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
unsigned NumElems = VT.getVectorNumElements();
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i != NumElems; ++i) {
@@ -2458,7 +2443,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
}
case ISD::EXTRACT_ELEMENT: {
- MVT OpTy = Node->getOperand(0).getValueType();
+ EVT OpTy = Node->getOperand(0).getValueType();
if (cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue()) {
// 1 -> Hi
Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0),
@@ -2507,7 +2492,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
case ISD::FABS: {
// Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
Tmp1 = Node->getOperand(0);
Tmp2 = DAG.getConstantFP(0.0, VT);
Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()),
@@ -2622,7 +2607,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
}
case ISD::SUB: {
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
assert(TLI.isOperationLegalOrCustom(ISD::ADD, VT) &&
TLI.isOperationLegalOrCustom(ISD::XOR, VT) &&
"Don't know how to expand this subtraction!");
@@ -2634,7 +2619,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
}
case ISD::UREM:
case ISD::SREM: {
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
SDVTList VTs = DAG.getVTList(VT, VT);
bool isSigned = Node->getOpcode() == ISD::SREM;
unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV;
@@ -2662,7 +2647,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::SDIV: {
bool isSigned = Node->getOpcode() == ISD::SDIV;
unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM;
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
SDVTList VTs = DAG.getVTList(VT, VT);
if (TLI.isOperationLegalOrCustom(DivRemOpc, VT))
Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Node->getOperand(0),
@@ -2680,7 +2665,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::MULHS: {
unsigned ExpandOpcode = Node->getOpcode() == ISD::MULHU ? ISD::UMUL_LOHI :
ISD::SMUL_LOHI;
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
SDVTList VTs = DAG.getVTList(VT, VT);
assert(TLI.isOperationLegalOrCustom(ExpandOpcode, VT) &&
"If this wasn't legal, it shouldn't have been created!");
@@ -2690,7 +2675,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
}
case ISD::MUL: {
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
SDVTList VTs = DAG.getVTList(VT, VT);
// See if multiply or divide can be lowered using two-result operations.
// We just need the low half of the multiply; try both the signed
@@ -2729,7 +2714,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
LHS, RHS);
Results.push_back(Sum);
- MVT OType = Node->getValueType(1);
+ EVT OType = Node->getValueType(1);
SDValue Zero = DAG.getConstant(0, LHS.getValueType());
@@ -2770,7 +2755,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
}
case ISD::UMULO:
case ISD::SMULO: {
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
SDValue LHS = Node->getOperand(0);
SDValue RHS = Node->getOperand(1);
SDValue BottomHalf;
@@ -2786,8 +2771,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
RHS);
TopHalf = BottomHalf.getValue(1);
- } else if (TLI.isTypeLegal(MVT::getIntegerVT(VT.getSizeInBits() * 2))) {
- MVT WideVT = MVT::getIntegerVT(VT.getSizeInBits() * 2);
+ } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2))) {
+ EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
@@ -2800,7 +2785,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
// type in some cases cases.
// Also, we can fall back to a division in some cases, but that's a big
// performance hit in the general case.
- assert(0 && "Don't know how to expand this operation yet!");
+ llvm_unreachable("Don't know how to expand this operation yet!");
}
if (isSigned) {
Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy());
@@ -2816,7 +2801,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break;
}
case ISD::BUILD_PAIR: {
- MVT PairTy = Node->getValueType(0);
+ EVT PairTy = Node->getValueType(0);
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1));
Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2,
@@ -2845,14 +2830,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
SDValue Table = Node->getOperand(1);
SDValue Index = Node->getOperand(2);
- MVT PTy = TLI.getPointerTy();
+ EVT PTy = TLI.getPointerTy();
MachineFunction &MF = DAG.getMachineFunction();
unsigned EntrySize = MF.getJumpTableInfo()->getEntrySize();
Index= DAG.getNode(ISD::MUL, dl, PTy,
Index, DAG.getConstant(EntrySize, PTy));
SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
- MVT MemVT = MVT::getIntegerVT(EntrySize * 8);
+ EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr,
PseudoSourceValue::getJumpTable(), 0, MemVT);
Addr = LD;
@@ -2899,7 +2884,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
// Otherwise, SETCC for the given comparison type must be completely
// illegal; expand it into a SELECT_CC.
- MVT VT = Node->getValueType(0);
+ EVT VT = Node->getValueType(0);
Tmp1 = DAG.getNode(ISD::SELECT_CC, dl, VT, Tmp1, Tmp2,
DAG.getConstant(1, VT), DAG.getConstant(0, VT), Tmp3);
Results.push_back(Tmp1);
@@ -2958,12 +2943,13 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
}
void SelectionDAGLegalize::PromoteNode(SDNode *Node,
SmallVectorImpl<SDValue> &Results) {
- MVT OVT = Node->getValueType(0);
+ EVT OVT = Node->getValueType(0);
if (Node->getOpcode() == ISD::UINT_TO_FP ||
- Node->getOpcode() == ISD::SINT_TO_FP) {
+ Node->getOpcode() == ISD::SINT_TO_FP ||
+ Node->getOpcode() == ISD::SETCC) {
OVT = Node->getOperand(0).getValueType();
}
- MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
+ EVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
DebugLoc dl = Node->getDebugLoc();
SDValue Tmp1, Tmp2, Tmp3;
switch (Node->getOpcode()) {
@@ -2973,10 +2959,10 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
// Zero extend the argument.
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
// Perform the larger operation.
- Tmp1 = DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Tmp1);
+ Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1);
if (Node->getOpcode() == ISD::CTTZ) {
//if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
- Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(Tmp1.getValueType()),
+ Tmp2 = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT),
Tmp1, DAG.getConstant(NVT.getSizeInBits(), NVT),
ISD::SETEQ);
Tmp1 = DAG.getNode(ISD::SELECT, dl, NVT, Tmp2,
@@ -2987,7 +2973,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
DAG.getConstant(NVT.getSizeInBits() -
OVT.getSizeInBits(), NVT));
}
- Results.push_back(Tmp1);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, OVT, Tmp1));
break;
case ISD::BSWAP: {
unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
@@ -3012,16 +2998,26 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
break;
case ISD::AND:
case ISD::OR:
- case ISD::XOR:
- assert(OVT.isVector() && "Don't know how to promote scalar logic ops");
- // Bit convert each of the values to the new type.
- Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(0));
- Tmp2 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(1));
+ case ISD::XOR: {
+ unsigned ExtOp, TruncOp;
+ if (OVT.isVector()) {
+ ExtOp = ISD::BIT_CONVERT;
+ TruncOp = ISD::BIT_CONVERT;
+ } else if (OVT.isInteger()) {
+ ExtOp = ISD::ANY_EXTEND;
+ TruncOp = ISD::TRUNCATE;
+ } else {
+ llvm_report_error("Cannot promote logic operation");
+ }
+ // Promote each of the values to the new type.
+ Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
+ Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
+ // Perform the larger operation, then convert back
Tmp1 = DAG.getNode(Node->getOpcode(), dl, NVT, Tmp1, Tmp2);
- // Bit convert the result back the original type.
- Results.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, OVT, Tmp1));
+ Results.push_back(DAG.getNode(TruncOp, dl, OVT, Tmp1));
break;
- case ISD::SELECT:
+ }
+ case ISD::SELECT: {
unsigned ExtOp, TruncOp;
if (Node->getValueType(0).isVector()) {
ExtOp = ISD::BIT_CONVERT;
@@ -3046,6 +3042,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
DAG.getIntPtrConstant(0));
Results.push_back(Tmp1);
break;
+ }
case ISD::VECTOR_SHUFFLE: {
SmallVector<int, 8> Mask;
cast<ShuffleVectorSDNode>(Node)->getMask(Mask);
@@ -3061,31 +3058,14 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
break;
}
case ISD::SETCC: {
- // First step, figure out the appropriate operation to use.
- // Allow SETCC to not be supported for all legal data types
- // Mostly this targets FP
- MVT NewInTy = Node->getOperand(0).getValueType();
- MVT OldVT = NewInTy; OldVT = OldVT;
-
- // Scan for the appropriate larger type to use.
- while (1) {
- NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT()+1);
-
- assert(NewInTy.isInteger() == OldVT.isInteger() &&
- "Fell off of the edge of the integer world");
- assert(NewInTy.isFloatingPoint() == OldVT.isFloatingPoint() &&
- "Fell off of the edge of the floating point world");
-
- // If the target supports SETCC of this type, use it.
- if (TLI.isOperationLegalOrCustom(ISD::SETCC, NewInTy))
- break;
- }
- if (NewInTy.isInteger())
- assert(0 && "Cannot promote Legal Integer SETCC yet");
- else {
- Tmp1 = DAG.getNode(ISD::FP_EXTEND, dl, NewInTy, Tmp1);
- Tmp2 = DAG.getNode(ISD::FP_EXTEND, dl, NewInTy, Tmp2);
+ unsigned ExtOp = ISD::FP_EXTEND;
+ if (NVT.isInteger()) {
+ ISD::CondCode CCCode =
+ cast<CondCodeSDNode>(Node->getOperand(2))->get();
+ ExtOp = isSignedIntSetCC(CCCode) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
}
+ Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
+ Tmp2 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(1));
Results.push_back(DAG.getNode(ISD::SETCC, dl, Node->getValueType(0),
Tmp1, Tmp2, Node->getOperand(2)));
break;
diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index c3c1bea..84e39b4 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -20,10 +20,12 @@
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
/// GetFPLibCall - Return the right libcall for the given floating point type.
-static RTLIB::Libcall GetFPLibCall(MVT VT,
+static RTLIB::Libcall GetFPLibCall(EVT VT,
RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64,
RTLIB::Libcall Call_F80,
@@ -41,18 +43,17 @@ static RTLIB::Libcall GetFPLibCall(MVT VT,
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Soften float result " << ResNo << ": "; N->dump(&DAG);
- cerr << "\n");
+ DEBUG(errs() << "Soften float result " << ResNo << ": "; N->dump(&DAG);
+ errs() << "\n");
SDValue R = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "SoftenFloatResult #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "SoftenFloatResult #" << ResNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to soften the result of this operator!");
- abort();
+ llvm_unreachable("Do not know how to soften the result of this operator!");
case ISD::BIT_CONVERT: R = SoftenFloatRes_BIT_CONVERT(N); break;
case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break;
@@ -107,14 +108,14 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_BUILD_PAIR(SDNode *N) {
// Convert the inputs to integers, and build a new pair out of them.
return DAG.getNode(ISD::BUILD_PAIR, N->getDebugLoc(),
- TLI.getTypeToTransformTo(N->getValueType(0)),
+ TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
BitConvertToInteger(N->getOperand(0)),
BitConvertToInteger(N->getOperand(1)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(ConstantFPSDNode *N) {
return DAG.getConstant(N->getValueAPF().bitcastToAPInt(),
- TLI.getTypeToTransformTo(N->getValueType(0)));
+ TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
@@ -125,7 +126,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FABS(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned Size = NVT.getSizeInBits();
// Mask = ~(1 << (Size-1))
@@ -136,7 +137,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FABS(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FADD(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return MakeLibCall(GetFPLibCall(N->getValueType(0),
@@ -148,7 +149,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FADD(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FCEIL(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::CEIL_F32,
@@ -163,8 +164,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) {
SDValue RHS = BitConvertToInteger(N->getOperand(1));
DebugLoc dl = N->getDebugLoc();
- MVT LVT = LHS.getValueType();
- MVT RVT = RHS.getValueType();
+ EVT LVT = LHS.getValueType();
+ EVT RVT = RHS.getValueType();
unsigned LSize = LVT.getSizeInBits();
unsigned RSize = RVT.getSizeInBits();
@@ -199,7 +200,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FCOS(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::COS_F32,
@@ -210,7 +211,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FCOS(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FDIV(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return MakeLibCall(GetFPLibCall(N->getValueType(0),
@@ -222,7 +223,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FDIV(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FEXP(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::EXP_F32,
@@ -233,7 +234,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FEXP(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FEXP2(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::EXP2_F32,
@@ -244,7 +245,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FEXP2(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FFLOOR(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::FLOOR_F32,
@@ -255,7 +256,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FFLOOR(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::LOG_F32,
@@ -266,7 +267,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG2(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::LOG2_F32,
@@ -277,7 +278,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG2(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG10(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::LOG10_F32,
@@ -288,7 +289,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FLOG10(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return MakeLibCall(GetFPLibCall(N->getValueType(0),
@@ -300,7 +301,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FNEARBYINT(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::NEARBYINT_F32,
@@ -311,7 +312,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FNEARBYINT(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FNEG(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
// Expand Y = FNEG(X) -> Y = SUB -0.0, X
SDValue Ops[2] = { DAG.getConstantFP(-0.0, N->getValueType(0)),
GetSoftenedFloat(N->getOperand(0)) };
@@ -324,7 +325,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FNEG(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = N->getOperand(0);
RTLIB::Libcall LC = RTLIB::getFPEXT(Op.getValueType(), N->getValueType(0));
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND!");
@@ -332,7 +333,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_ROUND(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = N->getOperand(0);
RTLIB::Libcall LC = RTLIB::getFPROUND(Op.getValueType(), N->getValueType(0));
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND!");
@@ -340,7 +341,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FP_ROUND(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FPOW(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return MakeLibCall(GetFPLibCall(N->getValueType(0),
@@ -354,7 +355,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FPOW(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) {
assert(N->getOperand(1).getValueType() == MVT::i32 &&
"Unsupported power type!");
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), N->getOperand(1) };
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::POWI_F32,
@@ -365,7 +366,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FREM(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return MakeLibCall(GetFPLibCall(N->getValueType(0),
@@ -377,7 +378,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FREM(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FRINT(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::RINT_F32,
@@ -388,7 +389,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FRINT(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FSIN(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::SIN_F32,
@@ -399,7 +400,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FSIN(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FSQRT(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::SQRT_F32,
@@ -410,7 +411,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FSQRT(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
GetSoftenedFloat(N->getOperand(1)) };
return MakeLibCall(GetFPLibCall(N->getValueType(0),
@@ -422,7 +423,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FTRUNC(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = GetSoftenedFloat(N->getOperand(0));
return MakeLibCall(GetFPLibCall(N->getValueType(0),
RTLIB::TRUNC_F32,
@@ -434,8 +435,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FTRUNC(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
LoadSDNode *L = cast<LoadSDNode>(N);
- MVT VT = N->getValueType(0);
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
DebugLoc dl = N->getDebugLoc();
SDValue NewL;
@@ -479,19 +480,19 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT_CC(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_UNDEF(SDNode *N) {
- return DAG.getUNDEF(TLI.getTypeToTransformTo(N->getValueType(0)));
+ return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_VAARG(SDNode *N) {
SDValue Chain = N->getOperand(0); // Get the chain.
SDValue Ptr = N->getOperand(1); // Get the pointer.
- MVT VT = N->getValueType(0);
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
DebugLoc dl = N->getDebugLoc();
SDValue NewVAARG;
NewVAARG = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2));
-
+
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), NewVAARG.getValue(1));
@@ -500,9 +501,9 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_VAARG(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
- MVT SVT = N->getOperand(0).getValueType();
- MVT RVT = N->getValueType(0);
- MVT NVT = MVT();
+ EVT SVT = N->getOperand(0).getValueType();
+ EVT RVT = N->getValueType(0);
+ EVT NVT = EVT();
DebugLoc dl = N->getDebugLoc();
// If the input is not legal, eg: i1 -> fp, then it needs to be promoted to
@@ -521,7 +522,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
// Sign/zero extend the argument if the libcall takes a larger type.
SDValue Op = DAG.getNode(Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
NVT, N->getOperand(0));
- return MakeLibCall(LC, TLI.getTypeToTransformTo(RVT), &Op, 1, false, dl);
+ return MakeLibCall(LC, TLI.getTypeToTransformTo(*DAG.getContext(), RVT), &Op, 1, false, dl);
}
@@ -530,18 +531,17 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
- DEBUG(cerr << "Soften float operand " << OpNo << ": "; N->dump(&DAG);
- cerr << "\n");
+ DEBUG(errs() << "Soften float operand " << OpNo << ": "; N->dump(&DAG);
+ errs() << "\n");
SDValue Res = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "SoftenFloatOperand Op #" << OpNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "SoftenFloatOperand Op #" << OpNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to soften this operator's operand!");
- abort();
+ llvm_unreachable("Do not know how to soften this operator's operand!");
case ISD::BIT_CONVERT: Res = SoftenFloatOp_BIT_CONVERT(N); break;
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
@@ -574,7 +574,7 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, DebugLoc dl) {
SDValue LHSInt = GetSoftenedFloat(NewLHS);
SDValue RHSInt = GetSoftenedFloat(NewRHS);
- MVT VT = NewLHS.getValueType();
+ EVT VT = NewLHS.getValueType();
assert((VT == MVT::f32 || VT == MVT::f64) && "Unsupported setcc type!");
@@ -637,7 +637,7 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
}
}
- MVT RetVT = MVT::i32; // FIXME: is this the correct return type?
+ EVT RetVT = MVT::i32; // FIXME: is this the correct return type?
SDValue Ops[2] = { LHSInt, RHSInt };
NewLHS = MakeLibCall(LC1, RetVT, Ops, 2, false/*sign irrelevant*/, dl);
NewRHS = DAG.getConstant(0, RetVT);
@@ -659,8 +659,8 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BIT_CONVERT(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_ROUND(SDNode *N) {
- MVT SVT = N->getOperand(0).getValueType();
- MVT RVT = N->getValueType(0);
+ EVT SVT = N->getOperand(0).getValueType();
+ EVT RVT = N->getValueType(0);
RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND libcall");
@@ -688,7 +688,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) {
- MVT RVT = N->getValueType(0);
+ EVT RVT = N->getValueType(0);
RTLIB::Libcall LC = RTLIB::getFPTOSINT(N->getOperand(0).getValueType(), RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_SINT!");
SDValue Op = GetSoftenedFloat(N->getOperand(0));
@@ -696,7 +696,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_UINT(SDNode *N) {
- MVT RVT = N->getValueType(0);
+ EVT RVT = N->getValueType(0);
RTLIB::Libcall LC = RTLIB::getFPTOUINT(N->getOperand(0).getValueType(), RVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_UINT!");
SDValue Op = GetSoftenedFloat(N->getOperand(0));
@@ -767,7 +767,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) {
/// have invalid operands or may have other results that need promotion, we just
/// know that (at least) one result needs expansion.
void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Expand float result: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Expand float result: "; N->dump(&DAG); errs() << "\n");
SDValue Lo, Hi;
Lo = Hi = SDValue();
@@ -778,11 +778,10 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "ExpandFloatResult #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "ExpandFloatResult #" << ResNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to expand the result of this operator!");
- abort();
+ llvm_unreachable("Do not know how to expand the result of this operator!");
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
@@ -830,7 +829,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
assert(NVT.getSizeInBits() == integerPartWidth &&
"Do not know how to expand this float constant!");
APInt C = cast<ConstantFPSDNode>(N)->getValueAPF().bitcastToAPInt();
@@ -982,7 +981,7 @@ void DAGTypeLegalizer::ExpandFloatRes_FNEG(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::ExpandFloatRes_FP_EXTEND(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
Hi = DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), NVT, N->getOperand(0));
Lo = DAG.getConstantFP(APFloat(APInt(NVT.getSizeInBits(), 0)), NVT);
}
@@ -1067,7 +1066,7 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo,
SDValue Ptr = LD->getBasePtr();
DebugLoc dl = N->getDebugLoc();
- MVT NVT = TLI.getTypeToTransformTo(LD->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), LD->getValueType(0));
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(LD->getMemoryVT().bitsLE(NVT) && "Float type not round?");
@@ -1090,10 +1089,10 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
SDValue &Hi) {
assert(N->getValueType(0) == MVT::ppcf128 && "Unsupported XINT_TO_FP!");
- MVT VT = N->getValueType(0);
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Src = N->getOperand(0);
- MVT SrcVT = Src.getValueType();
+ EVT SrcVT = Src.getValueType();
bool isSigned = N->getOpcode() == ISD::SINT_TO_FP;
DebugLoc dl = N->getDebugLoc();
@@ -1135,7 +1134,7 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
static const uint64_t TwoE128[] = { 0x47f0000000000000LL, 0 };
const uint64_t *Parts = 0;
- switch (SrcVT.getSimpleVT()) {
+ switch (SrcVT.getSimpleVT().SimpleTy) {
default:
assert(false && "Unsupported UINT_TO_FP!");
case MVT::i32:
@@ -1167,7 +1166,7 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
/// types of the node are known to be legal, but other operands of the node may
/// need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
- DEBUG(cerr << "Expand float operand: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Expand float operand: "; N->dump(&DAG); errs() << "\n");
SDValue Res = SDValue();
if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
@@ -1178,11 +1177,10 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "ExpandFloatOperand Op #" << OpNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "ExpandFloatOperand Op #" << OpNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to expand this operator's operand!");
- abort();
+ llvm_unreachable("Do not know how to expand this operator's operand!");
case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break;
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
@@ -1224,7 +1222,7 @@ void DAGTypeLegalizer::FloatExpandSetCCOperands(SDValue &NewLHS,
GetExpandedFloat(NewLHS, LHSLo, LHSHi);
GetExpandedFloat(NewRHS, RHSLo, RHSHi);
- MVT VT = NewLHS.getValueType();
+ EVT VT = NewLHS.getValueType();
assert(VT == MVT::ppcf128 && "Unsupported setcc type!");
// FIXME: This generated code sucks. We want to generate
@@ -1276,7 +1274,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) {
- MVT RVT = N->getValueType(0);
+ EVT RVT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
// Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
@@ -1297,7 +1295,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) {
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) {
- MVT RVT = N->getValueType(0);
+ EVT RVT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
// Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
@@ -1374,7 +1372,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
- MVT NVT = TLI.getTypeToTransformTo(ST->getValue().getValueType());
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ST->getValue().getValueType());
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(ST->getMemoryVT().bitsLE(NVT) && "Float type not round?");
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 0c826f6..8ac8063 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -20,6 +20,8 @@
#include "LegalizeTypes.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -31,7 +33,7 @@ using namespace llvm;
/// may also have invalid operands or may have other results that need
/// expansion, we just know that (at least) one result needs promotion.
void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Promote integer result: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Promote integer result: "; N->dump(&DAG); errs() << "\n");
SDValue Res = SDValue();
// See if the target wants to custom expand this node.
@@ -41,11 +43,10 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "PromoteIntegerResult #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "PromoteIntegerResult #" << ResNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to promote this operator!");
- abort();
+ llvm_unreachable("Do not know how to promote this operator!");
case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break;
case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break;
case ISD::BIT_CONVERT: Res = PromoteIntRes_BIT_CONVERT(N); break;
@@ -161,10 +162,10 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic2(AtomicSDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
SDValue InOp = N->getOperand(0);
- MVT InVT = InOp.getValueType();
- MVT NInVT = TLI.getTypeToTransformTo(InVT);
- MVT OutVT = N->getValueType(0);
- MVT NOutVT = TLI.getTypeToTransformTo(OutVT);
+ EVT InVT = InOp.getValueType();
+ EVT NInVT = TLI.getTypeToTransformTo(*DAG.getContext(), InVT);
+ EVT OutVT = N->getValueType(0);
+ EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
DebugLoc dl = N->getDebugLoc();
switch (getTypeAction(InVT)) {
@@ -201,7 +202,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
std::swap(Lo, Hi);
InOp = DAG.getNode(ISD::ANY_EXTEND, dl,
- MVT::getIntegerVT(NOutVT.getSizeInBits()),
+ EVT::getIntegerVT(*DAG.getContext(), NOutVT.getSizeInBits()),
JoinIntegers(Lo, Hi));
return DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, InOp);
}
@@ -211,24 +212,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
return DAG.getNode(ISD::BIT_CONVERT, dl, OutVT, GetWidenedVector(InOp));
}
- // Otherwise, lower the bit-convert to a store/load from the stack.
- // Create the stack frame object. Make sure it is aligned for both
- // the source and destination types.
- SDValue FIPtr = DAG.CreateStackTemporary(InVT, OutVT);
- int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
- const Value *SV = PseudoSourceValue::getFixedStack(FI);
-
- // Emit a store to the stack slot.
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, InOp, FIPtr, SV, 0);
-
- // Result is an extending load from the stack slot.
- return DAG.getExtLoad(ISD::EXTLOAD, dl, NOutVT, Store, FIPtr, SV, 0, OutVT);
+ return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
+ CreateStackStoreLoad(InOp, OutVT));
}
SDValue DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
- MVT OVT = N->getValueType(0);
- MVT NVT = Op.getValueType();
+ EVT OVT = N->getValueType(0);
+ EVT NVT = Op.getValueType();
DebugLoc dl = N->getDebugLoc();
unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
@@ -240,18 +231,18 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) {
// The pair element type may be legal, or may not promote to the same type as
// the result, for example i14 = BUILD_PAIR (i7, i7). Handle all cases.
return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(),
- TLI.getTypeToTransformTo(N->getValueType(0)),
+ TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
JoinIntegers(N->getOperand(0), N->getOperand(1)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
// FIXME there is no actual debug info here
DebugLoc dl = N->getDebugLoc();
// Zero extend things like i1, sign extend everything else. It shouldn't
// matter in theory which one we pick, but this tends to give better code?
unsigned Opc = VT.isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- SDValue Result = DAG.getNode(Opc, dl, TLI.getTypeToTransformTo(VT),
+ SDValue Result = DAG.getNode(Opc, dl, TLI.getTypeToTransformTo(*DAG.getContext(), VT),
SDValue(N, 0));
assert(isa<ConstantSDNode>(Result) && "Didn't constant fold ext?");
return Result;
@@ -263,7 +254,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CONVERT_RNDSAT(SDNode *N) {
CvtCode == ISD::CVT_US || CvtCode == ISD::CVT_UU ||
CvtCode == ISD::CVT_SF || CvtCode == ISD::CVT_UF) &&
"can only promote integers");
- MVT OutVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getConvertRndSat(OutVT, N->getDebugLoc(), N->getOperand(0),
N->getOperand(1), N->getOperand(2),
N->getOperand(3), N->getOperand(4), CvtCode);
@@ -273,8 +264,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) {
// Zero extend to the promoted type and do the count there.
SDValue Op = ZExtPromotedInteger(N->getOperand(0));
DebugLoc dl = N->getDebugLoc();
- MVT OVT = N->getValueType(0);
- MVT NVT = Op.getValueType();
+ EVT OVT = N->getValueType(0);
+ EVT NVT = Op.getValueType();
Op = DAG.getNode(ISD::CTLZ, dl, NVT, Op);
// Subtract off the extra leading bits in the bigger type.
return DAG.getNode(ISD::SUB, dl, NVT, Op,
@@ -290,8 +281,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTPOP(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) {
SDValue Op = GetPromotedInteger(N->getOperand(0));
- MVT OVT = N->getValueType(0);
- MVT NVT = Op.getValueType();
+ EVT OVT = N->getValueType(0);
+ EVT NVT = Op.getValueType();
DebugLoc dl = N->getDebugLoc();
// The count is the same in the promoted type except if the original
// value was zero. This can be handled by setting the bit just off
@@ -303,63 +294,21 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) {
- MVT OldVT = N->getValueType(0);
- SDValue OldVec = N->getOperand(0);
- if (getTypeAction(OldVec.getValueType()) == WidenVector)
- OldVec = GetWidenedVector(N->getOperand(0));
- unsigned OldElts = OldVec.getValueType().getVectorNumElements();
DebugLoc dl = N->getDebugLoc();
-
- if (OldElts == 1) {
- assert(!isTypeLegal(OldVec.getValueType()) &&
- "Legal one-element vector of a type needing promotion!");
- // It is tempting to follow GetScalarizedVector by a call to
- // GetPromotedInteger, but this would be wrong because the
- // scalarized value may not yet have been processed.
- return DAG.getNode(ISD::ANY_EXTEND, dl, TLI.getTypeToTransformTo(OldVT),
- GetScalarizedVector(OldVec));
- }
-
- // Convert to a vector half as long with an element type of twice the width,
- // for example <4 x i16> -> <2 x i32>.
- assert(!(OldElts & 1) && "Odd length vectors not supported!");
- MVT NewVT = MVT::getIntegerVT(2 * OldVT.getSizeInBits());
- assert(OldVT.isSimple() && NewVT.isSimple());
-
- SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
- MVT::getVectorVT(NewVT, OldElts / 2),
- OldVec);
-
- // Extract the element at OldIdx / 2 from the new vector.
- SDValue OldIdx = N->getOperand(1);
- SDValue NewIdx = DAG.getNode(ISD::SRL, dl, OldIdx.getValueType(), OldIdx,
- DAG.getConstant(1, TLI.getPointerTy()));
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, NewVec, NewIdx);
-
- // Select the appropriate half of the element: Lo if OldIdx was even,
- // Hi if it was odd.
- SDValue Lo = Elt;
- SDValue Hi = DAG.getNode(ISD::SRL, dl, NewVT, Elt,
- DAG.getConstant(OldVT.getSizeInBits(),
- TLI.getPointerTy()));
- if (TLI.isBigEndian())
- std::swap(Lo, Hi);
-
- // Extend to the promoted type.
- SDValue Odd = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, OldIdx);
- SDValue Res = DAG.getNode(ISD::SELECT, dl, NewVT, Odd, Hi, Lo);
- return DAG.getNode(ISD::ANY_EXTEND, dl, TLI.getTypeToTransformTo(OldVT), Res);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NVT, N->getOperand(0),
+ N->getOperand(1));
}
SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned NewOpc = N->getOpcode();
DebugLoc dl = N->getDebugLoc();
// If we're promoting a UINT to a larger size and the larger FP_TO_UINT is
// not Legal, check to see if we can use FP_TO_SINT instead. (If both UINT
// and SINT conversions are Custom, there is no way to tell which is preferable.
- // We choose SINT because that's the right thing on PPC.)
+ // We choose SINT because that's the right thing on PPC.)
if (N->getOpcode() == ISD::FP_TO_UINT &&
!TLI.isOperationLegal(ISD::FP_TO_UINT, NVT) &&
TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NVT))
@@ -376,7 +325,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
DebugLoc dl = N->getDebugLoc();
if (getTypeAction(N->getOperand(0).getValueType()) == PromoteInteger) {
@@ -403,7 +352,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) {
assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
ISD::LoadExtType ExtType =
ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType();
DebugLoc dl = N->getDebugLoc();
@@ -421,8 +370,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) {
/// Promote the overflow flag of an overflowing arithmetic node.
SDValue DAGTypeLegalizer::PromoteIntRes_Overflow(SDNode *N) {
// Simply change the return type of the boolean result.
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(1));
- MVT ValueVTs[] = { N->getValueType(0), NVT };
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(1));
+ EVT ValueVTs[] = { N->getValueType(0), NVT };
SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
SDValue Res = DAG.getNode(N->getOpcode(), N->getDebugLoc(),
DAG.getVTList(ValueVTs, 2), Ops, 2);
@@ -442,8 +391,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo) {
// sign extension of its truncation to the original type.
SDValue LHS = SExtPromotedInteger(N->getOperand(0));
SDValue RHS = SExtPromotedInteger(N->getOperand(1));
- MVT OVT = N->getOperand(0).getValueType();
- MVT NVT = LHS.getValueType();
+ EVT OVT = N->getOperand(0).getValueType();
+ EVT NVT = LHS.getValueType();
DebugLoc dl = N->getDebugLoc();
// Do the arithmetic in the larger type.
@@ -487,7 +436,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SELECT_CC(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) {
- MVT SVT = TLI.getSetCCResultType(N->getOperand(0).getValueType());
+ EVT SVT = TLI.getSetCCResultType(N->getOperand(0).getValueType());
assert(isTypeLegal(SVT) && "Illegal SetCC type!");
DebugLoc dl = N->getDebugLoc();
@@ -496,14 +445,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) {
N->getOperand(1), N->getOperand(2));
// Convert to the expected type.
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
assert(NVT.bitsLE(SVT) && "Integer type overpromoted?");
return DAG.getNode(ISD::TRUNCATE, dl, NVT, SetCC);
}
SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) {
return DAG.getNode(ISD::SHL, N->getDebugLoc(),
- TLI.getTypeToTransformTo(N->getValueType(0)),
+ TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
GetPromotedInteger(N->getOperand(0)), N->getOperand(1));
}
@@ -532,18 +481,18 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) {
// The input value must be properly zero extended.
- MVT VT = N->getValueType(0);
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Res = ZExtPromotedInteger(N->getOperand(0));
return DAG.getNode(ISD::SRL, N->getDebugLoc(), NVT, Res, N->getOperand(1));
}
SDValue DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Res;
switch (getTypeAction(N->getOperand(0).getValueType())) {
- default: assert(0 && "Unknown type action!");
+ default: llvm_unreachable("Unknown type action!");
case Legal:
case ExpandInteger:
Res = N->getOperand(0);
@@ -565,8 +514,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo) {
// zero extension of its truncation to the original type.
SDValue LHS = ZExtPromotedInteger(N->getOperand(0));
SDValue RHS = ZExtPromotedInteger(N->getOperand(1));
- MVT OVT = N->getOperand(0).getValueType();
- MVT NVT = LHS.getValueType();
+ EVT OVT = N->getOperand(0).getValueType();
+ EVT NVT = LHS.getValueType();
DebugLoc dl = N->getDebugLoc();
// Do the arithmetic in the larger type.
@@ -594,17 +543,17 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntRes_UNDEF(SDNode *N) {
- return DAG.getUNDEF(TLI.getTypeToTransformTo(N->getValueType(0)));
+ return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
SDValue Chain = N->getOperand(0); // Get the chain.
SDValue Ptr = N->getOperand(1); // Get the pointer.
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
- MVT RegVT = TLI.getRegisterType(VT);
- unsigned NumRegs = TLI.getNumRegisters(VT);
+ EVT RegVT = TLI.getRegisterType(*DAG.getContext(), VT);
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), VT);
// The argument is passed as NumRegs registers of type RegVT.
SmallVector<SDValue, 8> Parts(NumRegs);
@@ -618,7 +567,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
std::reverse(Parts.begin(), Parts.end());
// Assemble the parts in the promoted type.
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Res = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Parts[0]);
for (unsigned i = 1; i < NumRegs; ++i) {
SDValue Part = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Parts[i]);
@@ -650,7 +599,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) {
/// result types of the node are known to be legal, but other operands of the
/// node may need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
- DEBUG(cerr << "Promote integer operand: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Promote integer operand: "; N->dump(&DAG); errs() << "\n");
SDValue Res = SDValue();
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
@@ -659,11 +608,10 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "PromoteIntegerOperand Op #" << OpNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "PromoteIntegerOperand Op #" << OpNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to promote this operator's operand!");
- abort();
+ llvm_unreachable("Do not know how to promote this operator's operand!");
case ISD::ANY_EXTEND: Res = PromoteIntOp_ANY_EXTEND(N); break;
case ISD::BIT_CONVERT: Res = PromoteIntOp_BIT_CONVERT(N); break;
@@ -719,7 +667,7 @@ void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &NewLHS,SDValue &NewRHS,
// insert sign extends for ALL conditions, but zero extend is cheaper on
// many machines (an AND instead of two shifts), so prefer it.
switch (CCCode) {
- default: assert(0 && "Unknown integer comparison!");
+ default: llvm_unreachable("Unknown integer comparison!");
case ISD::SETEQ:
case ISD::SETNE:
case ISD::SETUGE:
@@ -770,7 +718,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
assert(OpNo == 1 && "only know how to promote condition");
// Promote all the way up to the canonical SetCC type.
- MVT SVT = TLI.getSetCCResultType(MVT::Other);
+ EVT SVT = TLI.getSetCCResultType(MVT::Other);
SDValue Cond = PromoteTargetBoolean(N->getOperand(1), SVT);
// The chain (Op#0) and basic block destination (Op#2) are always legal types.
@@ -780,7 +728,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) {
// Since the result type is legal, the operands must promote to it.
- MVT OVT = N->getOperand(0).getValueType();
+ EVT OVT = N->getOperand(0).getValueType();
SDValue Lo = ZExtPromotedInteger(N->getOperand(0));
SDValue Hi = GetPromotedInteger(N->getOperand(1));
assert(Lo.getValueType() == N->getValueType(0) && "Operand over promoted?");
@@ -795,7 +743,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
// The vector type is legal but the element type is not. This implies
// that the vector is a power-of-two in length and that the element
// type does not have a strange size (eg: it is not i1).
- MVT VecVT = N->getValueType(0);
+ EVT VecVT = N->getValueType(0);
unsigned NumElts = VecVT.getVectorNumElements();
assert(!(NumElts & 1) && "Legal vector of one illegal element?");
@@ -871,7 +819,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
assert(OpNo == 0 && "Only know how to promote condition");
// Promote all the way up to the canonical SetCC type.
- MVT SVT = TLI.getSetCCResultType(N->getOperand(1).getValueType());
+ EVT SVT = TLI.getSetCCResultType(N->getOperand(1).getValueType());
SDValue Cond = PromoteTargetBoolean(N->getOperand(0), SVT);
return DAG.UpdateNodeOperands(SDValue(N, 0), Cond,
@@ -962,7 +910,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) {
/// have invalid operands or may have other results that need promotion, we just
/// know that (at least) one result needs expansion.
void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Expand integer result: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Expand integer result: "; N->dump(&DAG); errs() << "\n");
SDValue Lo, Hi;
Lo = Hi = SDValue();
@@ -973,11 +921,10 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "ExpandIntegerResult #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "ExpandIntegerResult #" << ResNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to expand the result of this operator!");
- abort();
+ llvm_unreachable("Do not know how to expand the result of this operator!");
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, Lo, Hi); break;
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
@@ -1043,10 +990,10 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
SDValue InL, InH;
GetExpandedInteger(N->getOperand(0), InL, InH);
- MVT NVT = InL.getValueType();
+ EVT NVT = InL.getValueType();
unsigned VTBits = N->getValueType(0).getSizeInBits();
unsigned NVTBits = NVT.getSizeInBits();
- MVT ShTy = N->getOperand(1).getValueType();
+ EVT ShTy = N->getOperand(1).getValueType();
if (N->getOpcode() == ISD::SHL) {
if (Amt > VTBits) {
@@ -1060,7 +1007,7 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
Hi = InL;
} else if (Amt == 1 &&
TLI.isOperationLegalOrCustom(ISD::ADDC,
- TLI.getTypeToExpandTo(NVT))) {
+ TLI.getTypeToExpandTo(*DAG.getContext(), NVT))) {
// Emit this X << 1 as X+X.
SDVTList VTList = DAG.getVTList(NVT, MVT::Flag);
SDValue LoOps[2] = { InL, InL };
@@ -1130,8 +1077,8 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
bool DAGTypeLegalizer::
ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue Amt = N->getOperand(1);
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
- MVT ShTy = Amt.getValueType();
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ EVT ShTy = Amt.getValueType();
unsigned ShBits = ShTy.getSizeInBits();
unsigned NVTBits = NVT.getSizeInBits();
assert(isPowerOf2_32(NVTBits) &&
@@ -1158,7 +1105,7 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
DAG.getConstant(~HighBitMask, ShTy));
switch (N->getOpcode()) {
- default: assert(0 && "Unknown shift");
+ default: llvm_unreachable("Unknown shift");
case ISD::SHL:
Lo = DAG.getConstant(0, NVT); // Low part is zero.
Hi = DAG.getNode(ISD::SHL, dl, NVT, InL, Amt); // High part from Lo part.
@@ -1186,7 +1133,7 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
Amt);
unsigned Op1, Op2;
switch (N->getOpcode()) {
- default: assert(0 && "Unknown shift");
+ default: llvm_unreachable("Unknown shift");
case ISD::SHL: Op1 = ISD::SHL; Op2 = ISD::SRL; break;
case ISD::SRL:
case ISD::SRA: Op1 = ISD::SRL; Op2 = ISD::SHL; break;
@@ -1208,8 +1155,8 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
bool DAGTypeLegalizer::
ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue Amt = N->getOperand(1);
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
- MVT ShTy = Amt.getValueType();
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ EVT ShTy = Amt.getValueType();
unsigned NVTBits = NVT.getSizeInBits();
assert(isPowerOf2_32(NVTBits) &&
"Expanded integer type size not a power of two!");
@@ -1226,7 +1173,7 @@ ExpandShiftWithUnknownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue Lo1, Hi1, Lo2, Hi2;
switch (N->getOpcode()) {
- default: assert(0 && "Unknown shift");
+ default: llvm_unreachable("Unknown shift");
case ISD::SHL:
// ShAmt < NVTBits
Lo1 = DAG.getConstant(0, NVT); // Low part is zero.
@@ -1283,7 +1230,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
- MVT NVT = LHSL.getValueType();
+ EVT NVT = LHSL.getValueType();
SDValue LoOps[2] = { LHSL, RHSL };
SDValue HiOps[3] = { LHSH, RHSH };
@@ -1295,7 +1242,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
bool hasCarry =
TLI.isOperationLegalOrCustom(N->getOpcode() == ISD::ADD ?
ISD::ADDC : ISD::SUBC,
- TLI.getTypeToExpandTo(NVT));
+ TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
if (hasCarry) {
SDVTList VTList = DAG.getVTList(NVT, MVT::Flag);
@@ -1384,7 +1331,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_ANY_EXTEND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
DebugLoc dl = N->getDebugLoc();
SDValue Op = N->getOperand(0);
if (Op.getValueType().bitsLE(NVT)) {
@@ -1408,14 +1355,14 @@ void DAGTypeLegalizer::ExpandIntRes_AssertSext(SDNode *N,
SDValue &Lo, SDValue &Hi) {
DebugLoc dl = N->getDebugLoc();
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- MVT NVT = Lo.getValueType();
- MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+ EVT NVT = Lo.getValueType();
+ EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
unsigned NVTBits = NVT.getSizeInBits();
unsigned EVTBits = EVT.getSizeInBits();
if (NVTBits < EVTBits) {
Hi = DAG.getNode(ISD::AssertSext, dl, NVT, Hi,
- DAG.getValueType(MVT::getIntegerVT(EVTBits - NVTBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), EVTBits - NVTBits)));
} else {
Lo = DAG.getNode(ISD::AssertSext, dl, NVT, Lo, DAG.getValueType(EVT));
// The high part replicates the sign bit of Lo, make it explicit.
@@ -1428,14 +1375,14 @@ void DAGTypeLegalizer::ExpandIntRes_AssertZext(SDNode *N,
SDValue &Lo, SDValue &Hi) {
DebugLoc dl = N->getDebugLoc();
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- MVT NVT = Lo.getValueType();
- MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+ EVT NVT = Lo.getValueType();
+ EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
unsigned NVTBits = NVT.getSizeInBits();
unsigned EVTBits = EVT.getSizeInBits();
if (NVTBits < EVTBits) {
Hi = DAG.getNode(ISD::AssertZext, dl, NVT, Hi,
- DAG.getValueType(MVT::getIntegerVT(EVTBits - NVTBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), EVTBits - NVTBits)));
} else {
Lo = DAG.getNode(ISD::AssertZext, dl, NVT, Lo, DAG.getValueType(EVT));
// The high part must be zero, make it explicit.
@@ -1453,7 +1400,7 @@ void DAGTypeLegalizer::ExpandIntRes_BSWAP(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned NBitWidth = NVT.getSizeInBits();
const APInt &Cst = cast<ConstantSDNode>(N)->getAPIntValue();
Lo = DAG.getConstant(APInt(Cst).trunc(NBitWidth), NVT);
@@ -1465,7 +1412,7 @@ void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
DebugLoc dl = N->getDebugLoc();
// ctlz (HiLo) -> Hi != 0 ? ctlz(Hi) : (ctlz(Lo)+32)
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- MVT NVT = Lo.getValueType();
+ EVT NVT = Lo.getValueType();
SDValue HiNotZero = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Hi,
DAG.getConstant(0, NVT), ISD::SETNE);
@@ -1484,7 +1431,7 @@ void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N,
DebugLoc dl = N->getDebugLoc();
// ctpop(HiLo) -> ctpop(Hi)+ctpop(Lo)
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- MVT NVT = Lo.getValueType();
+ EVT NVT = Lo.getValueType();
Lo = DAG.getNode(ISD::ADD, dl, NVT, DAG.getNode(ISD::CTPOP, dl, NVT, Lo),
DAG.getNode(ISD::CTPOP, dl, NVT, Hi));
Hi = DAG.getConstant(0, NVT);
@@ -1495,7 +1442,7 @@ void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N,
DebugLoc dl = N->getDebugLoc();
// cttz (HiLo) -> Lo != 0 ? cttz(Lo) : (cttz(Hi)+32)
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- MVT NVT = Lo.getValueType();
+ EVT NVT = Lo.getValueType();
SDValue LoNotZero = DAG.getSetCC(dl, TLI.getSetCCResultType(NVT), Lo,
DAG.getConstant(0, NVT), ISD::SETNE);
@@ -1512,7 +1459,7 @@ void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_FP_TO_SINT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
DebugLoc dl = N->getDebugLoc();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
SDValue Op = N->getOperand(0);
RTLIB::Libcall LC = RTLIB::getFPTOSINT(Op.getValueType(), VT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-sint conversion!");
@@ -1522,7 +1469,7 @@ void DAGTypeLegalizer::ExpandIntRes_FP_TO_SINT(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::ExpandIntRes_FP_TO_UINT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
DebugLoc dl = N->getDebugLoc();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
SDValue Op = N->getOperand(0);
RTLIB::Libcall LC = RTLIB::getFPTOUINT(Op.getValueType(), VT);
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-uint conversion!");
@@ -1538,8 +1485,8 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
- MVT VT = N->getValueType(0);
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
ISD::LoadExtType ExtType = N->getExtensionType();
@@ -1551,10 +1498,10 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
assert(NVT.isByteSized() && "Expanded type not byte sized!");
if (N->getMemoryVT().bitsLE(NVT)) {
- MVT EVT = N->getMemoryVT();
+ EVT MemVT = N->getMemoryVT();
Lo = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
- EVT, isVolatile, Alignment);
+ MemVT, isVolatile, Alignment);
// Remember the chain.
Ch = Lo.getValue(1);
@@ -1580,7 +1527,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned ExcessBits =
N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits();
- MVT NEVT = MVT::getIntegerVT(ExcessBits);
+ EVT NEVT = EVT::getIntegerVT(*DAG.getContext(), ExcessBits);
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
@@ -1597,14 +1544,15 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
} else {
// Big-endian - high bits are at low addresses. Favor aligned loads at
// the cost of some bit-fiddling.
- MVT EVT = N->getMemoryVT();
- unsigned EBytes = EVT.getStoreSizeInBits()/8;
+ EVT MemVT = N->getMemoryVT();
+ unsigned EBytes = MemVT.getStoreSize();
unsigned IncrementSize = NVT.getSizeInBits()/8;
unsigned ExcessBits = (EBytes - IncrementSize)*8;
// Load both the high bits and maybe some of the low bits.
Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
- MVT::getIntegerVT(EVT.getSizeInBits() - ExcessBits),
+ EVT::getIntegerVT(*DAG.getContext(),
+ MemVT.getSizeInBits() - ExcessBits),
isVolatile, Alignment);
// Increment the pointer to the other half.
@@ -1613,7 +1561,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
// Load the rest of the low bits.
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, NVT, Ch, Ptr, N->getSrcValue(),
SVOffset+IncrementSize,
- MVT::getIntegerVT(ExcessBits),
+ EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
isVolatile, MinAlign(Alignment, IncrementSize));
// Build a factor node to remember that this load is independent of the
@@ -1652,8 +1600,8 @@ void DAGTypeLegalizer::ExpandIntRes_Logical(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT VT = N->getValueType(0);
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
DebugLoc dl = N->getDebugLoc();
bool HasMULHS = TLI.isOperationLegalOrCustom(ISD::MULHS, NVT);
@@ -1742,7 +1690,7 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
@@ -1762,7 +1710,7 @@ void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
// If we can emit an efficient shift operation, do so now. Check to see if
@@ -1788,7 +1736,7 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
// Next check to see if the target supports this SHL_PARTS operation or if it
// will custom expand it.
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
TargetLowering::LegalizeAction Action = TLI.getOperationAction(PartsOpc, NVT);
if ((Action == TargetLowering::Legal && TLI.isTypeLegal(NVT)) ||
Action == TargetLowering::Custom) {
@@ -1797,7 +1745,7 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
SDValue Ops[] = { LHSL, LHSH, N->getOperand(1) };
- MVT VT = LHSL.getValueType();
+ EVT VT = LHSL.getValueType();
Lo = DAG.getNode(PartsOpc, dl, DAG.getVTList(VT, VT), Ops, 3);
Hi = Lo.getValue(1);
return;
@@ -1838,7 +1786,7 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
else if (VT == MVT::i128)
LC = RTLIB::SRA_I128;
}
-
+
if (LC != RTLIB::UNKNOWN_LIBCALL && TLI.getLibcallName(LC)) {
SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) };
SplitInteger(MakeLibCall(LC, VT, Ops, 2, isSigned, dl), Lo, Hi);
@@ -1846,12 +1794,12 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
}
if (!ExpandShiftWithUnknownAmountBit(N, Lo, Hi))
- assert(0 && "Unsupported shift!");
+ llvm_unreachable("Unsupported shift!");
}
void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
DebugLoc dl = N->getDebugLoc();
SDValue Op = N->getOperand(0);
if (Op.getValueType().bitsLE(NVT)) {
@@ -1874,7 +1822,7 @@ void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,
unsigned ExcessBits =
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
- DAG.getValueType(MVT::getIntegerVT(ExcessBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), ExcessBits)));
}
}
@@ -1882,7 +1830,7 @@ void DAGTypeLegalizer::
ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) {
DebugLoc dl = N->getDebugLoc();
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+ EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
if (EVT.bitsLE(Lo.getValueType())) {
// sext_inreg the low part if needed.
@@ -1900,13 +1848,13 @@ ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) {
unsigned ExcessBits =
EVT.getSizeInBits() - Lo.getValueType().getSizeInBits();
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
- DAG.getValueType(MVT::getIntegerVT(ExcessBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), ExcessBits)));
}
}
void DAGTypeLegalizer::ExpandIntRes_SREM(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
@@ -1926,7 +1874,7 @@ void DAGTypeLegalizer::ExpandIntRes_SREM(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
DebugLoc dl = N->getDebugLoc();
Lo = DAG.getNode(ISD::TRUNCATE, dl, NVT, N->getOperand(0));
Hi = DAG.getNode(ISD::SRL, dl,
@@ -1937,7 +1885,7 @@ void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
@@ -1957,7 +1905,7 @@ void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_UREM(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
@@ -1977,7 +1925,7 @@ void DAGTypeLegalizer::ExpandIntRes_UREM(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
SDValue &Lo, SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
DebugLoc dl = N->getDebugLoc();
SDValue Op = N->getOperand(0);
if (Op.getValueType().bitsLE(NVT)) {
@@ -1996,7 +1944,7 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
SplitInteger(Res, Lo, Hi);
unsigned ExcessBits =
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
- Hi = DAG.getZeroExtendInReg(Hi, dl, MVT::getIntegerVT(ExcessBits));
+ Hi = DAG.getZeroExtendInReg(Hi, dl, EVT::getIntegerVT(*DAG.getContext(), ExcessBits));
}
}
@@ -2010,7 +1958,7 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
/// result types of the node are known to be legal, but other operands of the
/// node may need promotion or expansion as well as the specified one.
bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
- DEBUG(cerr << "Expand integer operand: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Expand integer operand: "; N->dump(&DAG); errs() << "\n");
SDValue Res = SDValue();
if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false))
@@ -2019,11 +1967,10 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "ExpandIntegerOperand Op #" << OpNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "ExpandIntegerOperand Op #" << OpNo << ": ";
+ N->dump(&DAG); errs() << "\n";
#endif
- assert(0 && "Do not know how to expand this operator's operand!");
- abort();
+ llvm_unreachable("Do not know how to expand this operator's operand!");
case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break;
case ISD::BR_CC: Res = ExpandIntOp_BR_CC(N); break;
@@ -2070,7 +2017,7 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS,
GetExpandedInteger(NewLHS, LHSLo, LHSHi);
GetExpandedInteger(NewRHS, RHSLo, RHSHi);
- MVT VT = NewLHS.getValueType();
+ EVT VT = NewLHS.getValueType();
if (CCCode == ISD::SETEQ || CCCode == ISD::SETNE) {
if (RHSLo == RHSHi) {
@@ -2105,7 +2052,7 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS,
// FIXME: This generated code sucks.
ISD::CondCode LowCC;
switch (CCCode) {
- default: assert(0 && "Unknown integer setcc!");
+ default: llvm_unreachable("Unknown integer setcc!");
case ISD::SETLT:
case ISD::SETULT: LowCC = ISD::SETULT; break;
case ISD::SETGT:
@@ -2122,7 +2069,7 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS,
// NOTE: on targets without efficient SELECT of bools, we can always use
// this identity: (B1 ? B2 : B3) --> (B1 & B2)|(!B1&B3)
- TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, false, true, NULL);
+ TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, false, true, true, NULL);
SDValue Tmp1, Tmp2;
Tmp1 = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSLo.getValueType()),
LHSLo, RHSLo, LowCC, false, DagCombineInfo, dl);
@@ -2228,7 +2175,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
SDValue DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) {
SDValue Op = N->getOperand(0);
- MVT DstVT = N->getValueType(0);
+ EVT DstVT = N->getValueType(0);
RTLIB::Libcall LC = RTLIB::getSINTTOFP(Op.getValueType(), DstVT);
assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Don't know how to expand this SINT_TO_FP!");
@@ -2242,8 +2189,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
assert(OpNo == 1 && "Can only expand the stored value so far");
- MVT VT = N->getOperand(1).getValueType();
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getOperand(1).getValueType();
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
int SVOffset = N->getSrcValueOffset();
@@ -2267,7 +2214,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
unsigned ExcessBits =
N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits();
- MVT NEVT = MVT::getIntegerVT(ExcessBits);
+ EVT NEVT = EVT::getIntegerVT(*DAG.getContext(), ExcessBits);
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
@@ -2282,11 +2229,11 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
// the cost of some bit-fiddling.
GetExpandedInteger(N->getValue(), Lo, Hi);
- MVT EVT = N->getMemoryVT();
- unsigned EBytes = EVT.getStoreSizeInBits()/8;
+ EVT ExtVT = N->getMemoryVT();
+ unsigned EBytes = ExtVT.getStoreSize();
unsigned IncrementSize = NVT.getSizeInBits()/8;
unsigned ExcessBits = (EBytes - IncrementSize)*8;
- MVT HiVT = MVT::getIntegerVT(EVT.getSizeInBits() - ExcessBits);
+ EVT HiVT = EVT::getIntegerVT(*DAG.getContext(), ExtVT.getSizeInBits() - ExcessBits);
if (ExcessBits < NVT.getSizeInBits()) {
// Transfer high bits from the top of Lo to the bottom of Hi.
@@ -2309,7 +2256,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
// Store the lowest ExcessBits bits in the second half.
Lo = DAG.getTruncStore(Ch, dl, Lo, Ptr, N->getSrcValue(),
SVOffset+IncrementSize,
- MVT::getIntegerVT(ExcessBits),
+ EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
isVolatile, MinAlign(Alignment, IncrementSize));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
@@ -2324,8 +2271,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_TRUNCATE(SDNode *N) {
SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
SDValue Op = N->getOperand(0);
- MVT SrcVT = Op.getValueType();
- MVT DstVT = N->getValueType(0);
+ EVT SrcVT = Op.getValueType();
+ EVT DstVT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
if (TLI.getOperationAction(ISD::SINT_TO_FP, SrcVT) == TargetLowering::Custom){
@@ -2360,7 +2307,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
ISD::SETLT);
// Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
- SDValue FudgePtr = DAG.getConstantPool(ConstantInt::get(FF.zext(64)),
+ SDValue FudgePtr = DAG.getConstantPool(
+ ConstantInt::get(*DAG.getContext(), FF.zext(64)),
TLI.getPointerTy());
// Get a pointer to FF if the sign bit was set, or to 0 otherwise.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 3135a44..5992f5d 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -15,9 +15,11 @@
#include "LegalizeTypes.h"
#include "llvm/CallingConv.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
static cl::opt<bool>
@@ -113,43 +115,43 @@ void DAGTypeLegalizer::PerformExpensiveChecks() {
if (I->getNodeId() != Processed) {
if (Mapped != 0) {
- cerr << "Unprocessed value in a map!";
+ errs() << "Unprocessed value in a map!";
Failed = true;
}
} else if (isTypeLegal(Res.getValueType()) || IgnoreNodeResults(I)) {
if (Mapped > 1) {
- cerr << "Value with legal type was transformed!";
+ errs() << "Value with legal type was transformed!";
Failed = true;
}
} else {
if (Mapped == 0) {
- cerr << "Processed value not in any map!";
+ errs() << "Processed value not in any map!";
Failed = true;
} else if (Mapped & (Mapped - 1)) {
- cerr << "Value in multiple maps!";
+ errs() << "Value in multiple maps!";
Failed = true;
}
}
if (Failed) {
if (Mapped & 1)
- cerr << " ReplacedValues";
+ errs() << " ReplacedValues";
if (Mapped & 2)
- cerr << " PromotedIntegers";
+ errs() << " PromotedIntegers";
if (Mapped & 4)
- cerr << " SoftenedFloats";
+ errs() << " SoftenedFloats";
if (Mapped & 8)
- cerr << " ScalarizedVectors";
+ errs() << " ScalarizedVectors";
if (Mapped & 16)
- cerr << " ExpandedIntegers";
+ errs() << " ExpandedIntegers";
if (Mapped & 32)
- cerr << " ExpandedFloats";
+ errs() << " ExpandedFloats";
if (Mapped & 64)
- cerr << " SplitVectors";
+ errs() << " SplitVectors";
if (Mapped & 128)
- cerr << " WidenedVectors";
- cerr << "\n";
- abort();
+ errs() << " WidenedVectors";
+ errs() << "\n";
+ llvm_unreachable(0);
}
}
}
@@ -210,7 +212,7 @@ bool DAGTypeLegalizer::run() {
// Scan the values produced by the node, checking to see if any result
// types are illegal.
for (unsigned i = 0, NumResults = N->getNumValues(); i < NumResults; ++i) {
- MVT ResultVT = N->getValueType(i);
+ EVT ResultVT = N->getValueType(i);
switch (getTypeAction(ResultVT)) {
default:
assert(false && "Unknown action!");
@@ -263,7 +265,7 @@ ScanOperands:
if (IgnoreNodeResults(N->getOperand(i).getNode()))
continue;
- MVT OpVT = N->getOperand(i).getValueType();
+ EVT OpVT = N->getOperand(i).getValueType();
switch (getTypeAction(OpVT)) {
default:
assert(false && "Unknown action!");
@@ -336,7 +338,7 @@ ScanOperands:
}
if (i == NumOperands) {
- DEBUG(cerr << "Legally typed node: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Legally typed node: "; N->dump(&DAG); errs() << "\n");
}
}
NodeDone:
@@ -405,7 +407,7 @@ NodeDone:
if (!IgnoreNodeResults(I))
for (unsigned i = 0, NumVals = I->getNumValues(); i < NumVals; ++i)
if (!isTypeLegal(I->getValueType(i))) {
- cerr << "Result type " << i << " illegal!\n";
+ errs() << "Result type " << i << " illegal!\n";
Failed = true;
}
@@ -413,25 +415,25 @@ NodeDone:
for (unsigned i = 0, NumOps = I->getNumOperands(); i < NumOps; ++i)
if (!IgnoreNodeResults(I->getOperand(i).getNode()) &&
!isTypeLegal(I->getOperand(i).getValueType())) {
- cerr << "Operand type " << i << " illegal!\n";
+ errs() << "Operand type " << i << " illegal!\n";
Failed = true;
}
if (I->getNodeId() != Processed) {
if (I->getNodeId() == NewNode)
- cerr << "New node not analyzed?\n";
+ errs() << "New node not analyzed?\n";
else if (I->getNodeId() == Unanalyzed)
- cerr << "Unanalyzed node not noticed?\n";
+ errs() << "Unanalyzed node not noticed?\n";
else if (I->getNodeId() > 0)
- cerr << "Operand not processed?\n";
+ errs() << "Operand not processed?\n";
else if (I->getNodeId() == ReadyToProcess)
- cerr << "Not added to worklist?\n";
+ errs() << "Not added to worklist?\n";
Failed = true;
}
if (Failed) {
- I->dump(&DAG); cerr << "\n";
- abort();
+ I->dump(&DAG); errs() << "\n";
+ llvm_unreachable(0);
}
}
#endif
@@ -479,8 +481,7 @@ SDNode *DAGTypeLegalizer::AnalyzeNewNode(SDNode *N) {
NewOps.push_back(Op);
} else if (Op != OrigOp) {
// This is the first operand to change - add all operands so far.
- for (unsigned j = 0; j < i; ++j)
- NewOps.push_back(N->getOperand(j));
+ NewOps.insert(NewOps.end(), N->op_begin(), N->op_begin() + i);
NewOps.push_back(Op);
}
}
@@ -732,6 +733,8 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
}
void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
+ assert(Result.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ "Invalid type for promoted integer");
AnalyzeNewValue(Result);
SDValue &OpEntry = PromotedIntegers[Op];
@@ -740,6 +743,8 @@ void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
}
void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) {
+ assert(Result.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ "Invalid type for softened float");
AnalyzeNewValue(Result);
SDValue &OpEntry = SoftenedFloats[Op];
@@ -748,6 +753,8 @@ void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) {
}
void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) {
+ assert(Result.getValueType() == Op.getValueType().getVectorElementType() &&
+ "Invalid type for scalarized vector");
AnalyzeNewValue(Result);
SDValue &OpEntry = ScalarizedVectors[Op];
@@ -767,6 +774,9 @@ void DAGTypeLegalizer::GetExpandedInteger(SDValue Op, SDValue &Lo,
void DAGTypeLegalizer::SetExpandedInteger(SDValue Op, SDValue Lo,
SDValue Hi) {
+ assert(Lo.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ Hi.getValueType() == Lo.getValueType() &&
+ "Invalid type for expanded integer");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
AnalyzeNewValue(Lo);
AnalyzeNewValue(Hi);
@@ -790,6 +800,9 @@ void DAGTypeLegalizer::GetExpandedFloat(SDValue Op, SDValue &Lo,
void DAGTypeLegalizer::SetExpandedFloat(SDValue Op, SDValue Lo,
SDValue Hi) {
+ assert(Lo.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ Hi.getValueType() == Lo.getValueType() &&
+ "Invalid type for expanded float");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
AnalyzeNewValue(Lo);
AnalyzeNewValue(Hi);
@@ -813,6 +826,12 @@ void DAGTypeLegalizer::GetSplitVector(SDValue Op, SDValue &Lo,
void DAGTypeLegalizer::SetSplitVector(SDValue Op, SDValue Lo,
SDValue Hi) {
+ assert(Lo.getValueType().getVectorElementType() ==
+ Op.getValueType().getVectorElementType() &&
+ 2*Lo.getValueType().getVectorNumElements() ==
+ Op.getValueType().getVectorNumElements() &&
+ Hi.getValueType() == Lo.getValueType() &&
+ "Invalid type for split vector");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
AnalyzeNewValue(Lo);
AnalyzeNewValue(Hi);
@@ -825,6 +844,8 @@ void DAGTypeLegalizer::SetSplitVector(SDValue Op, SDValue Lo,
}
void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
+ assert(Result.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ "Invalid type for widened vector");
AnalyzeNewValue(Result);
SDValue &OpEntry = WidenedVectors[Op];
@@ -841,7 +862,7 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
unsigned BitWidth = Op.getValueType().getSizeInBits();
return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(),
- MVT::getIntegerVT(BitWidth), Op);
+ EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);
}
/// BitConvertVectorToIntegerVector - Convert to a vector of integers of the
@@ -849,14 +870,14 @@ SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
assert(Op.getValueType().isVector() && "Only applies to vectors!");
unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits();
- MVT EltNVT = MVT::getIntegerVT(EltWidth);
+ EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = Op.getValueType().getVectorNumElements();
return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(),
- MVT::getVectorVT(EltNVT, NumElts), Op);
+ EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op);
}
SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op,
- MVT DestVT) {
+ EVT DestVT) {
DebugLoc dl = Op.getDebugLoc();
// Create the stack frame object. Make sure it is aligned for both
// the source and destination types.
@@ -875,7 +896,7 @@ SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op,
/// The last parameter being TRUE means we are dealing with a
/// node with illegal result types. The second parameter denotes the type of
/// illegal ResNo in that case.
-bool DAGTypeLegalizer::CustomLowerNode(SDNode *N, MVT VT, bool LegalizeResult) {
+bool DAGTypeLegalizer::CustomLowerNode(SDNode *N, EVT VT, bool LegalizeResult) {
// See if the target wants to custom lower this node.
if (TLI.getOperationAction(N->getOpcode(), VT) != TargetLowering::Custom)
return false;
@@ -900,21 +921,14 @@ bool DAGTypeLegalizer::CustomLowerNode(SDNode *N, MVT VT, bool LegalizeResult) {
/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
/// which is split into two not necessarily identical pieces.
-void DAGTypeLegalizer::GetSplitDestVTs(MVT InVT, MVT &LoVT, MVT &HiVT) {
+void DAGTypeLegalizer::GetSplitDestVTs(EVT InVT, EVT &LoVT, EVT &HiVT) {
+ // Currently all types are split in half.
if (!InVT.isVector()) {
- LoVT = HiVT = TLI.getTypeToTransformTo(InVT);
+ LoVT = HiVT = TLI.getTypeToTransformTo(*DAG.getContext(), InVT);
} else {
- MVT NewEltVT = InVT.getVectorElementType();
unsigned NumElements = InVT.getVectorNumElements();
- if ((NumElements & (NumElements-1)) == 0) { // Simple power of two vector.
- NumElements >>= 1;
- LoVT = HiVT = MVT::getVectorVT(NewEltVT, NumElements);
- } else { // Non-power-of-two vectors.
- unsigned NewNumElts_Lo = 1 << Log2_32(NumElements);
- unsigned NewNumElts_Hi = NumElements - NewNumElts_Lo;
- LoVT = MVT::getVectorVT(NewEltVT, NewNumElts_Lo);
- HiVT = MVT::getVectorVT(NewEltVT, NewNumElts_Hi);
- }
+ assert(!(NumElements & 1) && "Splitting vector, but not in half!");
+ LoVT = HiVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), NumElements/2);
}
}
@@ -923,14 +937,14 @@ void DAGTypeLegalizer::GetSplitDestVTs(MVT InVT, MVT &LoVT, MVT &HiVT) {
void DAGTypeLegalizer::GetPairElements(SDValue Pair,
SDValue &Lo, SDValue &Hi) {
DebugLoc dl = Pair.getDebugLoc();
- MVT NVT = TLI.getTypeToTransformTo(Pair.getValueType());
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), Pair.getValueType());
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NVT, Pair,
DAG.getIntPtrConstant(0));
Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NVT, Pair,
DAG.getIntPtrConstant(1));
}
-SDValue DAGTypeLegalizer::GetVectorElementPointer(SDValue VecPtr, MVT EltVT,
+SDValue DAGTypeLegalizer::GetVectorElementPointer(SDValue VecPtr, EVT EltVT,
SDValue Index) {
DebugLoc dl = Index.getDebugLoc();
// Make sure the index type is big enough to compute in.
@@ -952,9 +966,9 @@ SDValue DAGTypeLegalizer::JoinIntegers(SDValue Lo, SDValue Hi) {
// Arbitrarily use dlHi for result DebugLoc
DebugLoc dlHi = Hi.getDebugLoc();
DebugLoc dlLo = Lo.getDebugLoc();
- MVT LVT = Lo.getValueType();
- MVT HVT = Hi.getValueType();
- MVT NVT = MVT::getIntegerVT(LVT.getSizeInBits() + HVT.getSizeInBits());
+ EVT LVT = Lo.getValueType();
+ EVT HVT = Hi.getValueType();
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(), LVT.getSizeInBits() + HVT.getSizeInBits());
Lo = DAG.getNode(ISD::ZERO_EXTEND, dlLo, NVT, Lo);
Hi = DAG.getNode(ISD::ANY_EXTEND, dlHi, NVT, Hi);
@@ -986,7 +1000,7 @@ SDValue DAGTypeLegalizer::LibCallify(RTLIB::Libcall LC, SDNode *N,
/// MakeLibCall - Generate a libcall taking the given operands as arguments and
/// returning a result of type RetVT.
-SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT,
+SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
const SDValue *Ops, unsigned NumOps,
bool isSigned, DebugLoc dl) {
TargetLowering::ArgListTy Args;
@@ -995,7 +1009,7 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0; i != NumOps; ++i) {
Entry.Node = Ops[i];
- Entry.Ty = Entry.Node.getValueType().getTypeForMVT();
+ Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
Args.push_back(Entry);
@@ -1003,17 +1017,19 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- const Type *RetTy = RetVT.getTypeForMVT();
+ const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
- false, 0, CallingConv::C, false, Callee, Args, DAG, dl);
+ false, 0, TLI.getLibcallCallingConv(LC), false,
+ /*isReturnValueUsed=*/true,
+ Callee, Args, DAG, dl);
return CallInfo.first;
}
/// PromoteTargetBoolean - Promote the given target boolean to a target boolean
/// of the given type. A target boolean is an integer value, not necessarily of
/// type i1, the bits of which conform to getBooleanContents.
-SDValue DAGTypeLegalizer::PromoteTargetBoolean(SDValue Bool, MVT VT) {
+SDValue DAGTypeLegalizer::PromoteTargetBoolean(SDValue Bool, EVT VT) {
DebugLoc dl = Bool.getDebugLoc();
ISD::NodeType ExtendCode;
switch (TLI.getBooleanContents()) {
@@ -1039,7 +1055,7 @@ SDValue DAGTypeLegalizer::PromoteTargetBoolean(SDValue Bool, MVT VT) {
/// SplitInteger - Return the lower LoVT bits of Op in Lo and the upper HiVT
/// bits in Hi.
void DAGTypeLegalizer::SplitInteger(SDValue Op,
- MVT LoVT, MVT HiVT,
+ EVT LoVT, EVT HiVT,
SDValue &Lo, SDValue &Hi) {
DebugLoc dl = Op.getDebugLoc();
assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() ==
@@ -1054,7 +1070,7 @@ void DAGTypeLegalizer::SplitInteger(SDValue Op,
/// type half the size of Op's.
void DAGTypeLegalizer::SplitInteger(SDValue Op,
SDValue &Lo, SDValue &Hi) {
- MVT HalfVT = MVT::getIntegerVT(Op.getValueType().getSizeInBits()/2);
+ EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Op.getValueType().getSizeInBits()/2);
SplitInteger(Op, HalfVT, HalfVT, Lo, Hi);
}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 02b0732..859c656 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -64,7 +64,7 @@ private:
SoftenFloat, // Convert this float type to a same size integer type.
ExpandFloat, // Split this float type into two of half the size.
ScalarizeVector, // Replace this one-element vector with its element type.
- SplitVector, // This vector type should be split into smaller vectors.
+ SplitVector, // Split this vector type into two of half the size.
WidenVector // This vector type should be widened into a larger vector.
};
@@ -74,8 +74,8 @@ private:
TargetLowering::ValueTypeActionImpl ValueTypeActions;
/// getTypeAction - Return how we should legalize values of this type.
- LegalizeAction getTypeAction(MVT VT) const {
- switch (ValueTypeActions.getTypeAction(VT)) {
+ LegalizeAction getTypeAction(EVT VT) const {
+ switch (ValueTypeActions.getTypeAction(*DAG.getContext(), VT)) {
default:
assert(false && "Unknown legalize action!");
case TargetLowering::Legal:
@@ -96,7 +96,7 @@ private:
if (VT.isInteger())
return ExpandInteger;
else if (VT.getSizeInBits() ==
- TLI.getTypeToTransformTo(VT).getSizeInBits())
+ TLI.getTypeToTransformTo(*DAG.getContext(), VT).getSizeInBits())
return SoftenFloat;
else
return ExpandFloat;
@@ -109,8 +109,9 @@ private:
}
/// isTypeLegal - Return true if this type is legal on this target.
- bool isTypeLegal(MVT VT) const {
- return ValueTypeActions.getTypeAction(VT) == TargetLowering::Legal;
+ bool isTypeLegal(EVT VT) const {
+ return (ValueTypeActions.getTypeAction(*DAG.getContext(), VT) ==
+ TargetLowering::Legal);
}
/// IgnoreNodeResults - Pretend all of this node's results are legal.
@@ -185,19 +186,19 @@ private:
// Common routines.
SDValue BitConvertToInteger(SDValue Op);
SDValue BitConvertVectorToIntegerVector(SDValue Op);
- SDValue CreateStackStoreLoad(SDValue Op, MVT DestVT);
- bool CustomLowerNode(SDNode *N, MVT VT, bool LegalizeResult);
- SDValue GetVectorElementPointer(SDValue VecPtr, MVT EltVT, SDValue Index);
+ SDValue CreateStackStoreLoad(SDValue Op, EVT DestVT);
+ bool CustomLowerNode(SDNode *N, EVT VT, bool LegalizeResult);
+ SDValue GetVectorElementPointer(SDValue VecPtr, EVT EltVT, SDValue Index);
SDValue JoinIntegers(SDValue Lo, SDValue Hi);
SDValue LibCallify(RTLIB::Libcall LC, SDNode *N, bool isSigned);
- SDValue MakeLibCall(RTLIB::Libcall LC, MVT RetVT,
+ SDValue MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
const SDValue *Ops, unsigned NumOps, bool isSigned,
DebugLoc dl);
- SDValue PromoteTargetBoolean(SDValue Bool, MVT VT);
+ SDValue PromoteTargetBoolean(SDValue Bool, EVT VT);
void ReplaceValueWith(SDValue From, SDValue To);
void ReplaceValueWithHelper(SDValue From, SDValue To);
void SplitInteger(SDValue Op, SDValue &Lo, SDValue &Hi);
- void SplitInteger(SDValue Op, MVT LoVT, MVT HiVT,
+ void SplitInteger(SDValue Op, EVT LoVT, EVT HiVT,
SDValue &Lo, SDValue &Hi);
//===--------------------------------------------------------------------===//
@@ -224,7 +225,7 @@ private:
/// SExtPromotedInteger - Get a promoted operand and sign extend it to the
/// final size.
SDValue SExtPromotedInteger(SDValue Op) {
- MVT OldVT = Op.getValueType();
+ EVT OldVT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
Op = GetPromotedInteger(Op);
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Op.getValueType(), Op,
@@ -234,7 +235,7 @@ private:
/// ZExtPromotedInteger - Get a promoted operand and zero extend it to the
/// final size.
SDValue ZExtPromotedInteger(SDValue Op) {
- MVT OldVT = Op.getValueType();
+ EVT OldVT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
Op = GetPromotedInteger(Op);
return DAG.getZeroExtendInReg(Op, dl, OldVT);
@@ -506,7 +507,6 @@ private:
// Vector Result Scalarization: <1 x ty> -> ty.
void ScalarizeVectorResult(SDNode *N, unsigned OpNo);
SDValue ScalarizeVecRes_BinOp(SDNode *N);
- SDValue ScalarizeVecRes_ShiftOp(SDNode *N);
SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
SDValue ScalarizeVecRes_BIT_CONVERT(SDNode *N);
@@ -518,6 +518,7 @@ private:
SDValue ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N);
SDValue ScalarizeVecRes_SELECT(SDNode *N);
SDValue ScalarizeVecRes_SELECT_CC(SDNode *N);
+ SDValue ScalarizeVecRes_SETCC(SDNode *N);
SDValue ScalarizeVecRes_UNDEF(SDNode *N);
SDValue ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N);
SDValue ScalarizeVecRes_VSETCC(SDNode *N);
@@ -533,8 +534,8 @@ private:
// Vector Splitting Support: LegalizeVectorTypes.cpp
//===--------------------------------------------------------------------===//
- /// GetSplitVector - Given a processed vector Op which was split into smaller
- /// vectors, this method returns the smaller vectors. The first elements of
+ /// GetSplitVector - Given a processed vector Op which was split into vectors
+ /// of half the size, this method returns the halves. The first elements of
/// Op coincide with the elements of Lo; the remaining elements of Op coincide
/// with the elements of Hi: Op is what you would get by concatenating Lo and
/// Hi. For example, if Op is a v8i32 that was split into two v4i32's, then
@@ -558,10 +559,10 @@ private:
void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_LOAD(LoadSDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi);
- void SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N, SDValue &Lo,
+ void SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N, SDValue &Lo,
SDValue &Hi);
- void SplitVecRes_VSETCC(SDNode *N, SDValue &Lo, SDValue &Hi);
// Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>.
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
@@ -641,7 +642,7 @@ private:
SDValue BasePtr, const Value *SV,
int SVOffset, unsigned Alignment,
bool isVolatile, unsigned LdWidth,
- MVT ResType, DebugLoc dl);
+ EVT ResType, DebugLoc dl);
/// Helper genWidenVectorStores - Helper function to generate a set of
/// stores to store a widen vector into non widen memory
@@ -664,7 +665,7 @@ private:
/// Modifies a vector input (widen or narrows) to a vector of NVT. The
/// input vector must have the same element type as NVT.
- SDValue ModifyToType(SDValue InOp, MVT WidenVT);
+ SDValue ModifyToType(SDValue InOp, EVT WidenVT);
//===--------------------------------------------------------------------===//
@@ -686,7 +687,7 @@ private:
/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
/// which is split (or expanded) into two not necessarily identical pieces.
- void GetSplitDestVTs(MVT InVT, MVT &LoVT, MVT &HiVT);
+ void GetSplitDestVTs(EVT InVT, EVT &LoVT, EVT &HiVT);
/// GetPairElements - Use ISD::EXTRACT_ELEMENT nodes to extract the low and
/// high parts of the given value.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 6e5adee..0eafe62 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -11,9 +11,11 @@
// The routines here perform legalization when the details of the type (such as
// whether it is an integer or a float) do not matter.
// Expansion is the act of changing a computation in an illegal type to be a
-// computation in two identical registers of a smaller type.
+// computation in two identical registers of a smaller type. The Lo/Hi part
+// is required to be stored first in memory on little/big-endian machines.
// Splitting is the act of changing a computation in an illegal type to be a
// computation in two not necessarily identical registers of a smaller type.
+// There are no requirements on how the type is represented in memory.
//
//===----------------------------------------------------------------------===//
@@ -32,10 +34,10 @@ using namespace llvm;
void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- MVT OutVT = N->getValueType(0);
- MVT NOutVT = TLI.getTypeToTransformTo(OutVT);
+ EVT OutVT = N->getValueType(0);
+ EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
SDValue InOp = N->getOperand(0);
- MVT InVT = InOp.getValueType();
+ EVT InVT = InOp.getValueType();
DebugLoc dl = N->getDebugLoc();
// Handle some special cases efficiently.
@@ -59,16 +61,12 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
return;
case SplitVector:
- // Convert the split parts of the input if it was split in two.
GetSplitVector(InOp, Lo, Hi);
- if (Lo.getValueType() == Hi.getValueType()) {
- if (TLI.isBigEndian())
- std::swap(Lo, Hi);
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
- return;
- }
- break;
+ if (TLI.isBigEndian())
+ std::swap(Lo, Hi);
+ Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
+ Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
+ return;
case ScalarizeVector:
// Convert the element instead.
SplitInteger(BitConvertToInteger(GetScalarizedVector(InOp)), Lo, Hi);
@@ -78,7 +76,7 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
case WidenVector: {
assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BIT_CONVERT");
InOp = GetWidenedVector(InOp);
- MVT InNVT = MVT::getVectorVT(InVT.getVectorElementType(),
+ EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
InVT.getVectorNumElements()/2);
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
DAG.getIntPtrConstant(0));
@@ -95,7 +93,7 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
if (InVT.isVector() && OutVT.isInteger()) {
// Handle cases like i64 = BIT_CONVERT v1i64 on x86, where the operand
// is legal but the result is not.
- MVT NVT = MVT::getVectorVT(NOutVT, 2);
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), NOutVT, 2);
if (isTypeLegal(NVT)) {
SDValue CastInOp = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, InOp);
@@ -106,7 +104,7 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
if (TLI.isBigEndian())
std::swap(Lo, Hi);
-
+
return;
}
}
@@ -117,7 +115,7 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
// Create the stack frame object. Make sure it is aligned for both
// the source and expanded destination types.
unsigned Alignment =
- TLI.getTargetData()->getPrefTypeAlignment(NOutVT.getTypeForMVT());
+ TLI.getTargetData()->getPrefTypeAlignment(NOutVT.getTypeForEVT(*DAG.getContext()));
SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
const Value *SV = PseudoSourceValue::getFixedStack(SPFI);
@@ -169,11 +167,11 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Convert to a vector of the expanded element type, for example
// <3 x i64> -> <6 x i32>.
- MVT OldVT = N->getValueType(0);
- MVT NewVT = TLI.getTypeToTransformTo(OldVT);
+ EVT OldVT = N->getValueType(0);
+ EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
- MVT::getVectorVT(NewVT, 2*OldElts),
+ EVT::getVectorVT(*DAG.getContext(), NewVT, 2*OldElts),
OldVec);
// Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector.
@@ -200,7 +198,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
DebugLoc dl = N->getDebugLoc();
LoadSDNode *LD = cast<LoadSDNode>(N);
- MVT NVT = TLI.getTypeToTransformTo(LD->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), LD->getValueType(0));
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
int SVOffset = LD->getSrcValueOffset();
@@ -235,7 +233,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
}
void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
- MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Chain = N->getOperand(0);
SDValue Ptr = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
@@ -265,8 +263,8 @@ SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
// instead, but only if the new vector type is legal (otherwise there
// is no point, and it might create expansion loops). For example, on
// x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32.
- MVT OVT = N->getOperand(0).getValueType();
- MVT NVT = MVT::getVectorVT(TLI.getTypeToTransformTo(OVT), 2);
+ EVT OVT = N->getOperand(0).getValueType();
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(), TLI.getTypeToTransformTo(*DAG.getContext(), OVT), 2);
if (isTypeLegal(NVT)) {
SDValue Parts[2];
@@ -286,10 +284,10 @@ SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
// The vector type is legal but the element type needs expansion.
- MVT VecVT = N->getValueType(0);
+ EVT VecVT = N->getValueType(0);
unsigned NumElts = VecVT.getVectorNumElements();
- MVT OldVT = N->getOperand(0).getValueType();
- MVT NewVT = TLI.getTypeToTransformTo(OldVT);
+ EVT OldVT = N->getOperand(0).getValueType();
+ EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
DebugLoc dl = N->getDebugLoc();
assert(OldVT == VecVT.getVectorElementType() &&
@@ -310,7 +308,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
}
SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
- MVT::getVectorVT(NewVT, NewElts.size()),
+ EVT::getVectorVT(*DAG.getContext(), NewVT, NewElts.size()),
&NewElts[0], NewElts.size());
// Convert the new vector to the old vector type.
@@ -325,20 +323,20 @@ SDValue DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) {
SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
// The vector type is legal but the element type needs expansion.
- MVT VecVT = N->getValueType(0);
+ EVT VecVT = N->getValueType(0);
unsigned NumElts = VecVT.getVectorNumElements();
DebugLoc dl = N->getDebugLoc();
SDValue Val = N->getOperand(1);
- MVT OldEVT = Val.getValueType();
- MVT NewEVT = TLI.getTypeToTransformTo(OldEVT);
+ EVT OldEVT = Val.getValueType();
+ EVT NewEVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldEVT);
assert(OldEVT == VecVT.getVectorElementType() &&
"Inserted element type doesn't match vector element type!");
// Bitconvert to a vector of twice the length with elements of the expanded
// type, insert the expanded vector elements, and then convert back.
- MVT NewVecVT = MVT::getVectorVT(NewEVT, NumElts*2);
+ EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEVT, NumElts*2);
SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
NewVecVT, N->getOperand(0));
@@ -360,7 +358,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
assert(VT.getVectorElementType() == N->getOperand(0).getValueType() &&
"SCALAR_TO_VECTOR operand type doesn't match vector element type!");
unsigned NumElts = VT.getVectorNumElements();
@@ -378,7 +376,7 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
DebugLoc dl = N->getDebugLoc();
StoreSDNode *St = cast<StoreSDNode>(N);
- MVT NVT = TLI.getTypeToTransformTo(St->getValue().getValueType());
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), St->getValue().getValueType());
SDValue Chain = St->getChain();
SDValue Ptr = St->getBasePtr();
int SVOffset = St->getSrcValueOffset();
@@ -464,7 +462,7 @@ void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDValue &Lo,
}
void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi) {
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
DebugLoc dl = N->getDebugLoc();
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
Lo = DAG.getUNDEF(LoVT);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 335c73c..ca19430 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -20,8 +20,8 @@
// type i8 which must be promoted.
//
// This does not legalize vector manipulations like ISD::BUILD_VECTOR,
-// or operations that happen to take a vector which are custom-lowered like
-// ISD::CALL; the legalization for such operations never produces nodes
+// or operations that happen to take a vector which are custom-lowered;
+// the legalization for such operations never produces nodes
// with illegal types, so it's okay to put off legalizing them until
// SelectionDAG::Legalize runs.
//
@@ -129,7 +129,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
if (!HasVectorValue)
return TranslateLegalizeResults(Op, Result);
- MVT QueryType;
+ EVT QueryType;
switch (Op.getOpcode()) {
default:
return TranslateLegalizeResults(Op, Result);
@@ -231,10 +231,10 @@ SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
// Vector "promotion" is basically just bitcasting and doing the operation
// in a different type. For example, x86 promotes ISD::AND on v2i32 to
// v1i64.
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
assert(Op.getNode()->getNumValues() == 1 &&
"Can't promote a vector with multiple results!");
- MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT);
+ EVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT);
DebugLoc dl = Op.getDebugLoc();
SmallVector<SDValue, 4> Operands(Op.getNumOperands());
@@ -260,11 +260,11 @@ SDValue VectorLegalizer::ExpandFNEG(SDValue Op) {
}
SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
unsigned NumElems = VT.getVectorNumElements();
- MVT EltVT = VT.getVectorElementType();
+ EVT EltVT = VT.getVectorElementType();
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1), CC = Op.getOperand(2);
- MVT TmpEltVT = LHS.getValueType().getVectorElementType();
+ EVT TmpEltVT = LHS.getValueType().getVectorElementType();
DebugLoc dl = Op.getDebugLoc();
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
@@ -287,11 +287,11 @@ SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
/// the operation be expanded. "Unroll" the vector, splitting out the scalars
/// and operating on each element individually.
SDValue VectorLegalizer::UnrollVectorOp(SDValue Op) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
assert(Op.getNode()->getNumValues() == 1 &&
"Can't unroll a vector with multiple results!");
unsigned NE = VT.getVectorNumElements();
- MVT EltVT = VT.getVectorElementType();
+ EVT EltVT = VT.getVectorElementType();
DebugLoc dl = Op.getDebugLoc();
SmallVector<SDValue, 8> Scalars;
@@ -299,10 +299,10 @@ SDValue VectorLegalizer::UnrollVectorOp(SDValue Op) {
for (unsigned i = 0; i != NE; ++i) {
for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
SDValue Operand = Op.getOperand(j);
- MVT OperandVT = Operand.getValueType();
+ EVT OperandVT = Operand.getValueType();
if (OperandVT.isVector()) {
// A vector operand; extract a single element.
- MVT OperandEltVT = OperandVT.getVectorElementType();
+ EVT OperandEltVT = OperandVT.getVectorElementType();
Operands[j] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
OperandEltVT,
Operand,
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 68967cc..a03f825 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -15,14 +15,16 @@
// eventually decomposes to scalars if the target doesn't support v4f32 or v2f32
// types.
// Splitting is the act of changing a computation in an invalid vector type to
-// be a computation in multiple vectors of a smaller type. For example,
-// implementing <128 x f32> operations in terms of two <64 x f32> operations.
+// be a computation in two vectors of half the size. For example, implementing
+// <128 x f32> operations in terms of two <64 x f32> operations.
//
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -30,18 +32,19 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Scalarize node result " << ResNo << ": "; N->dump(&DAG);
- cerr << "\n");
+ DEBUG(errs() << "Scalarize node result " << ResNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n");
SDValue R = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "ScalarizeVectorResult #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "ScalarizeVectorResult #" << ResNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n";
#endif
- assert(0 && "Do not know how to scalarize the result of this operator!");
- abort();
+ llvm_unreachable("Do not know how to scalarize the result of this operator!");
case ISD::BIT_CONVERT: R = ScalarizeVecRes_BIT_CONVERT(N); break;
case ISD::BUILD_VECTOR: R = N->getOperand(0); break;
@@ -53,6 +56,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SCALAR_TO_VECTOR: R = ScalarizeVecRes_SCALAR_TO_VECTOR(N); break;
case ISD::SELECT: R = ScalarizeVecRes_SELECT(N); break;
case ISD::SELECT_CC: R = ScalarizeVecRes_SELECT_CC(N); break;
+ case ISD::SETCC: R = ScalarizeVecRes_SETCC(N); break;
case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break;
case ISD::VECTOR_SHUFFLE: R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
case ISD::VSETCC: R = ScalarizeVecRes_VSETCC(N); break;
@@ -72,9 +76,14 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FCEIL:
case ISD::FRINT:
case ISD::FNEARBYINT:
+ case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP:
case ISD::TRUNCATE:
- case ISD::UINT_TO_FP: R = ScalarizeVecRes_UnaryOp(N); break;
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ R = ScalarizeVecRes_UnaryOp(N);
+ break;
case ISD::ADD:
case ISD::AND:
@@ -91,11 +100,12 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SUB:
case ISD::UDIV:
case ISD::UREM:
- case ISD::XOR: R = ScalarizeVecRes_BinOp(N); break;
-
+ case ISD::XOR:
case ISD::SHL:
case ISD::SRA:
- case ISD::SRL: R = ScalarizeVecRes_ShiftOp(N); break;
+ case ISD::SRL:
+ R = ScalarizeVecRes_BinOp(N);
+ break;
}
// If R is null, the sub-method took care of registering the result.
@@ -110,21 +120,14 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
LHS.getValueType(), LHS, RHS);
}
-SDValue DAGTypeLegalizer::ScalarizeVecRes_ShiftOp(SDNode *N) {
- SDValue LHS = GetScalarizedVector(N->getOperand(0));
- SDValue ShiftAmt = GetScalarizedVector(N->getOperand(1));
- return DAG.getNode(N->getOpcode(), N->getDebugLoc(),
- LHS.getValueType(), LHS, ShiftAmt);
-}
-
SDValue DAGTypeLegalizer::ScalarizeVecRes_BIT_CONVERT(SDNode *N) {
- MVT NewVT = N->getValueType(0).getVectorElementType();
+ EVT NewVT = N->getValueType(0).getVectorElementType();
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
NewVT, N->getOperand(0));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N) {
- MVT NewVT = N->getValueType(0).getVectorElementType();
+ EVT NewVT = N->getValueType(0).getVectorElementType();
SDValue Op0 = GetScalarizedVector(N->getOperand(0));
return DAG.getConvertRndSat(NewVT, N->getDebugLoc(),
Op0, DAG.getValueType(NewVT),
@@ -150,7 +153,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) {
// The value to insert may have a wider type than the vector element type,
// so be sure to truncate it to the element type if necessary.
SDValue Op = N->getOperand(1);
- MVT EltVT = N->getValueType(0).getVectorElementType();
+ EVT EltVT = N->getValueType(0).getVectorElementType();
if (Op.getValueType() != EltVT)
// FIXME: Can this happen for floating point types?
Op = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), EltVT, Op);
@@ -167,7 +170,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
DAG.getUNDEF(N->getBasePtr().getValueType()),
N->getSrcValue(), N->getSrcValueOffset(),
N->getMemoryVT().getVectorElementType(),
- N->isVolatile(), N->getAlignment());
+ N->isVolatile(), N->getOriginalAlignment());
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
@@ -177,7 +180,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) {
// Get the dest type - it doesn't always match the input type, e.g. int_to_fp.
- MVT DestVT = N->getValueType(0).getVectorElementType();
+ EVT DestVT = N->getValueType(0).getVectorElementType();
SDValue Op = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(N->getOpcode(), N->getDebugLoc(), DestVT, Op);
}
@@ -185,7 +188,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) {
SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
// If the operand is wider than the vector element type then it is implicitly
// truncated. Make that explicit here.
- MVT EltVT = N->getValueType(0).getVectorElementType();
+ EVT EltVT = N->getValueType(0).getVectorElementType();
SDValue InOp = N->getOperand(0);
if (InOp.getValueType() != EltVT)
return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), EltVT, InOp);
@@ -207,6 +210,15 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT_CC(SDNode *N) {
N->getOperand(4));
}
+SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) {
+ SDValue LHS = GetScalarizedVector(N->getOperand(0));
+ SDValue RHS = GetScalarizedVector(N->getOperand(1));
+ DebugLoc DL = N->getDebugLoc();
+
+ // Turn it into a scalar SETCC.
+ return DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, N->getOperand(2));
+}
+
SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) {
return DAG.getUNDEF(N->getValueType(0).getVectorElementType());
}
@@ -223,12 +235,12 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) {
SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) {
SDValue LHS = GetScalarizedVector(N->getOperand(0));
SDValue RHS = GetScalarizedVector(N->getOperand(1));
- MVT NVT = N->getValueType(0).getVectorElementType();
- MVT SVT = TLI.getSetCCResultType(LHS.getValueType());
- DebugLoc dl = N->getDebugLoc();
+ EVT NVT = N->getValueType(0).getVectorElementType();
+ EVT SVT = TLI.getSetCCResultType(LHS.getValueType());
+ DebugLoc DL = N->getDebugLoc();
// Turn it into a scalar SETCC.
- SDValue Res = DAG.getNode(ISD::SETCC, dl, SVT, LHS, RHS, N->getOperand(2));
+ SDValue Res = DAG.getNode(ISD::SETCC, DL, SVT, LHS, RHS, N->getOperand(2));
// VSETCC always returns a sign-extended value, while SETCC may not. The
// SETCC result type may not match the vector element type. Correct these.
@@ -237,19 +249,19 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) {
// Ensure the SETCC result is sign-extended.
if (TLI.getBooleanContents() !=
TargetLowering::ZeroOrNegativeOneBooleanContent)
- Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, SVT, Res,
+ Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, SVT, Res,
DAG.getValueType(MVT::i1));
// Truncate to the final type.
- return DAG.getNode(ISD::TRUNCATE, dl, NVT, Res);
- } else {
- // The SETCC result type is smaller than the vector element type.
- // If the SetCC result is not sign-extended, chop it down to MVT::i1.
- if (TLI.getBooleanContents() !=
- TargetLowering::ZeroOrNegativeOneBooleanContent)
- Res = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Res);
- // Sign extend to the final type.
- return DAG.getNode(ISD::SIGN_EXTEND, dl, NVT, Res);
+ return DAG.getNode(ISD::TRUNCATE, DL, NVT, Res);
}
+
+ // The SETCC result type is smaller than the vector element type.
+ // If the SetCC result is not sign-extended, chop it down to MVT::i1.
+ if (TLI.getBooleanContents() !=
+ TargetLowering::ZeroOrNegativeOneBooleanContent)
+ Res = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Res);
+ // Sign extend to the final type.
+ return DAG.getNode(ISD::SIGN_EXTEND, DL, NVT, Res);
}
@@ -258,31 +270,32 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) {
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
- DEBUG(cerr << "Scalarize node operand " << OpNo << ": "; N->dump(&DAG);
- cerr << "\n");
+ DEBUG(errs() << "Scalarize node operand " << OpNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n");
SDValue Res = SDValue();
if (Res.getNode() == 0) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "ScalarizeVectorOperand Op #" << OpNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "ScalarizeVectorOperand Op #" << OpNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n";
#endif
- assert(0 && "Do not know how to scalarize this operator's operand!");
- abort();
-
+ llvm_unreachable("Do not know how to scalarize this operator's operand!");
case ISD::BIT_CONVERT:
- Res = ScalarizeVecOp_BIT_CONVERT(N); break;
-
+ Res = ScalarizeVecOp_BIT_CONVERT(N);
+ break;
case ISD::CONCAT_VECTORS:
- Res = ScalarizeVecOp_CONCAT_VECTORS(N); break;
-
+ Res = ScalarizeVecOp_CONCAT_VECTORS(N);
+ break;
case ISD::EXTRACT_VECTOR_ELT:
- Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N); break;
-
+ Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N);
+ break;
case ISD::STORE:
- Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo); break;
+ Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo);
+ break;
}
}
@@ -323,7 +336,11 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_CONCAT_VECTORS(SDNode *N) {
/// be scalarized, it must be <1 x ty>, so just return the element, ignoring the
/// index.
SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
- return GetScalarizedVector(N->getOperand(0));
+ SDValue Res = GetScalarizedVector(N->getOperand(0));
+ if (Res.getValueType() != N->getValueType(0))
+ Res = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), N->getValueType(0),
+ Res);
+ return Res;
}
/// ScalarizeVecOp_STORE - If the value to store is a vector that needs to be
@@ -343,7 +360,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
return DAG.getStore(N->getChain(), dl, GetScalarizedVector(N->getOperand(1)),
N->getBasePtr(), N->getSrcValue(), N->getSrcValueOffset(),
- N->isVolatile(), N->getAlignment());
+ N->isVolatile(), N->getOriginalAlignment());
}
@@ -357,17 +374,19 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
/// legalization, we just know that (at least) one result needs vector
/// splitting.
void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Split node result: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Split node result: ";
+ N->dump(&DAG);
+ errs() << "\n");
SDValue Lo, Hi;
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "SplitVectorResult #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "SplitVectorResult #" << ResNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n";
#endif
- assert(0 && "Do not know how to split the result of this operator!");
- abort();
+ llvm_unreachable("Do not know how to split the result of this operator!");
case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, Lo, Hi); break;
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
@@ -382,10 +401,16 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FPOWI: SplitVecRes_FPOWI(N, Lo, Hi); break;
case ISD::INSERT_VECTOR_ELT: SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
case ISD::SCALAR_TO_VECTOR: SplitVecRes_SCALAR_TO_VECTOR(N, Lo, Hi); break;
- case ISD::LOAD: SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);break;
+ case ISD::LOAD:
+ SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
+ break;
+ case ISD::SETCC:
+ case ISD::VSETCC:
+ SplitVecRes_SETCC(N, Lo, Hi);
+ break;
case ISD::VECTOR_SHUFFLE:
- SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi); break;
- case ISD::VSETCC: SplitVecRes_VSETCC(N, Lo, Hi); break;
+ SplitVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N), Lo, Hi);
+ break;
case ISD::CTTZ:
case ISD::CTLZ:
@@ -403,8 +428,13 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
case ISD::TRUNCATE:
- case ISD::UINT_TO_FP: SplitVecRes_UnaryOp(N, Lo, Hi); break;
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ SplitVecRes_UnaryOp(N, Lo, Hi);
+ break;
case ISD::ADD:
case ISD::SUB:
@@ -424,7 +454,9 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SRL:
case ISD::UREM:
case ISD::SREM:
- case ISD::FREM: SplitVecRes_BinOp(N, Lo, Hi); break;
+ case ISD::FREM:
+ SplitVecRes_BinOp(N, Lo, Hi);
+ break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -448,12 +480,12 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
// We know the result is a vector. The input may be either a vector or a
// scalar value.
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
DebugLoc dl = N->getDebugLoc();
SDValue InOp = N->getOperand(0);
- MVT InVT = InOp.getValueType();
+ EVT InVT = InOp.getValueType();
// Handle some special cases efficiently.
switch (getTypeAction(InVT)) {
@@ -488,8 +520,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
}
// In the general case, convert the input to an integer and split it by hand.
- MVT LoIntVT = MVT::getIntegerVT(LoVT.getSizeInBits());
- MVT HiIntVT = MVT::getIntegerVT(HiVT.getSizeInBits());
+ EVT LoIntVT = EVT::getIntegerVT(*DAG.getContext(), LoVT.getSizeInBits());
+ EVT HiIntVT = EVT::getIntegerVT(*DAG.getContext(), HiVT.getSizeInBits());
if (TLI.isBigEndian())
std::swap(LoIntVT, HiIntVT);
@@ -503,7 +535,7 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
DebugLoc dl = N->getDebugLoc();
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
unsigned LoNumElts = LoVT.getVectorNumElements();
@@ -525,7 +557,7 @@ void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo,
return;
}
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors);
@@ -537,7 +569,7 @@ void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::SplitVecRes_CONVERT_RNDSAT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
DebugLoc dl = N->getDebugLoc();
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
@@ -550,12 +582,11 @@ void DAGTypeLegalizer::SplitVecRes_CONVERT_RNDSAT(SDNode *N, SDValue &Lo,
// Split the input.
SDValue VLo, VHi;
- MVT InVT = N->getOperand(0).getValueType();
+ EVT InVT = N->getOperand(0).getValueType();
switch (getTypeAction(InVT)) {
- default: assert(0 && "Unexpected type action!");
+ default: llvm_unreachable("Unexpected type action!");
case Legal: {
- assert(LoVT == HiVT && "Legal non-power-of-two vector type?");
- MVT InNVT = MVT::getVectorVT(InVT.getVectorElementType(),
+ EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
VLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, N->getOperand(0),
DAG.getIntPtrConstant(0));
@@ -570,9 +601,8 @@ void DAGTypeLegalizer::SplitVecRes_CONVERT_RNDSAT(SDNode *N, SDValue &Lo,
// If the result needs to be split and the input needs to be widened,
// the two types must have different lengths. Use the widened result
// and extract from it to do the split.
- assert(LoVT == HiVT && "Legal non-power-of-two vector type?");
SDValue InOp = GetWidenedVector(N->getOperand(0));
- MVT InNVT = MVT::getVectorVT(InVT.getVectorElementType(),
+ EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
VLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
DAG.getIntPtrConstant(0));
@@ -595,14 +625,11 @@ void DAGTypeLegalizer::SplitVecRes_EXTRACT_SUBVECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Vec = N->getOperand(0);
SDValue Idx = N->getOperand(1);
- MVT IdxVT = Idx.getValueType();
+ EVT IdxVT = Idx.getValueType();
DebugLoc dl = N->getDebugLoc();
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
- // The indices are not guaranteed to be a multiple of the new vector
- // size unless the original vector type was split in two.
- assert(LoVT == HiVT && "Non power-of-two vectors not supported!");
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, LoVT, Vec, Idx);
Idx = DAG.getNode(ISD::ADD, dl, IdxVT, Idx,
@@ -639,8 +666,8 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
}
// Spill the vector to the stack.
- MVT VecVT = Vec.getValueType();
- MVT EltVT = VecVT.getVectorElementType();
+ EVT VecVT = Vec.getValueType();
+ EVT EltVT = VecVT.getVectorElementType();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, NULL, 0);
@@ -648,7 +675,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// so use a truncating store.
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
unsigned Alignment =
- TLI.getTargetData()->getPrefTypeAlignment(VecVT.getTypeForMVT());
+ TLI.getTargetData()->getPrefTypeAlignment(VecVT.getTypeForEVT(*DAG.getContext()));
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, NULL, 0, EltVT);
// Load the Lo part from the stack slot.
@@ -666,7 +693,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
DebugLoc dl = N->getDebugLoc();
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
Lo = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoVT, N->getOperand(0));
@@ -676,7 +703,7 @@ void DAGTypeLegalizer::SplitVecRes_SCALAR_TO_VECTOR(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
SDValue &Hi) {
assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!");
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
DebugLoc dl = LD->getDebugLoc();
GetSplitDestVTs(LD->getValueType(0), LoVT, HiVT);
@@ -686,11 +713,11 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
const Value *SV = LD->getSrcValue();
int SVOffset = LD->getSrcValueOffset();
- MVT MemoryVT = LD->getMemoryVT();
- unsigned Alignment = LD->getAlignment();
+ EVT MemoryVT = LD->getMemoryVT();
+ unsigned Alignment = LD->getOriginalAlignment();
bool isVolatile = LD->isVolatile();
- MVT LoMemVT, HiMemVT;
+ EVT LoMemVT, HiMemVT;
GetSplitDestVTs(MemoryVT, LoMemVT, HiMemVT);
Lo = DAG.getLoad(ISD::UNINDEXED, dl, ExtType, LoVT, Ch, Ptr, Offset,
@@ -700,7 +727,6 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
SVOffset += IncrementSize;
- Alignment = MinAlign(Alignment, IncrementSize);
Hi = DAG.getLoad(ISD::UNINDEXED, dl, ExtType, HiVT, Ch, Ptr, Offset,
SV, SVOffset, HiMemVT, isVolatile, Alignment);
@@ -714,20 +740,43 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
ReplaceValueWith(SDValue(LD, 1), Ch);
}
+void DAGTypeLegalizer::SplitVecRes_SETCC(SDNode *N, SDValue &Lo, SDValue &Hi) {
+ EVT LoVT, HiVT;
+ DebugLoc DL = N->getDebugLoc();
+ GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+
+ // Split the input.
+ EVT InVT = N->getOperand(0).getValueType();
+ SDValue LL, LH, RL, RH;
+ EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
+ LoVT.getVectorNumElements());
+ LL = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(0),
+ DAG.getIntPtrConstant(0));
+ LH = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(0),
+ DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+
+ RL = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(1),
+ DAG.getIntPtrConstant(0));
+ RH = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, N->getOperand(1),
+ DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
+
+ Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2));
+ Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2));
+}
+
void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
SDValue &Hi) {
// Get the dest types - they may not match the input types, e.g. int_to_fp.
- MVT LoVT, HiVT;
+ EVT LoVT, HiVT;
DebugLoc dl = N->getDebugLoc();
GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
// Split the input.
- MVT InVT = N->getOperand(0).getValueType();
+ EVT InVT = N->getOperand(0).getValueType();
switch (getTypeAction(InVT)) {
- default: assert(0 && "Unexpected type action!");
+ default: llvm_unreachable("Unexpected type action!");
case Legal: {
- assert(LoVT == HiVT && "Legal non-power-of-two vector type?");
- MVT InNVT = MVT::getVectorVT(InVT.getVectorElementType(),
+ EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, N->getOperand(0),
DAG.getIntPtrConstant(0));
@@ -742,9 +791,8 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo,
// If the result needs to be split and the input needs to be widened,
// the two types must have different lengths. Use the widened result
// and extract from it to do the split.
- assert(LoVT == HiVT && "Legal non-power-of-two vector type?");
SDValue InOp = GetWidenedVector(N->getOperand(0));
- MVT InNVT = MVT::getVectorVT(InVT.getVectorElementType(),
+ EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
LoVT.getVectorNumElements());
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InNVT, InOp,
DAG.getIntPtrConstant(0));
@@ -765,10 +813,8 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
DebugLoc dl = N->getDebugLoc();
GetSplitVector(N->getOperand(0), Inputs[0], Inputs[1]);
GetSplitVector(N->getOperand(1), Inputs[2], Inputs[3]);
- MVT NewVT = Inputs[0].getValueType();
+ EVT NewVT = Inputs[0].getValueType();
unsigned NewElts = NewVT.getVectorNumElements();
- assert(NewVT == Inputs[1].getValueType() &&
- "Non power-of-two vectors not supported!");
// If Lo or Hi uses elements from at most two of the four input vectors, then
// express it as a vector shuffle of those two inputs. Otherwise extract the
@@ -825,7 +871,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
}
if (useBuildVector) {
- MVT EltVT = NewVT.getVectorElementType();
+ EVT EltVT = NewVT.getVectorElementType();
SmallVector<SDValue, 16> SVOps;
// Extract the input elements by hand.
@@ -868,20 +914,6 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
}
}
-void DAGTypeLegalizer::SplitVecRes_VSETCC(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
- MVT LoVT, HiVT;
- DebugLoc dl = N->getDebugLoc();
- GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
-
- SDValue LL, LH, RL, RH;
- GetSplitVector(N->getOperand(0), LL, LH);
- GetSplitVector(N->getOperand(1), RL, RH);
-
- Lo = DAG.getNode(ISD::VSETCC, dl, LoVT, LL, RL, N->getOperand(2));
- Hi = DAG.getNode(ISD::VSETCC, dl, HiVT, LH, RH, N->getOperand(2));
-}
-
//===----------------------------------------------------------------------===//
// Operand Vector Splitting
@@ -892,24 +924,27 @@ void DAGTypeLegalizer::SplitVecRes_VSETCC(SDNode *N, SDValue &Lo,
/// result types of the node are known to be legal, but other operands of the
/// node may need legalization as well as the specified one.
bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
- DEBUG(cerr << "Split node operand: "; N->dump(&DAG); cerr << "\n");
+ DEBUG(errs() << "Split node operand: ";
+ N->dump(&DAG);
+ errs() << "\n");
SDValue Res = SDValue();
if (Res.getNode() == 0) {
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "SplitVectorOperand Op #" << OpNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "SplitVectorOperand Op #" << OpNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n";
#endif
- assert(0 && "Do not know how to split this operator's operand!");
- abort();
+ llvm_unreachable("Do not know how to split this operator's operand!");
case ISD::BIT_CONVERT: Res = SplitVecOp_BIT_CONVERT(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
- case ISD::STORE: Res = SplitVecOp_STORE(cast<StoreSDNode>(N),
- OpNo); break;
+ case ISD::STORE:
+ Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
+ break;
case ISD::CTTZ:
case ISD::CTLZ:
@@ -917,8 +952,13 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
case ISD::TRUNCATE:
- case ISD::UINT_TO_FP: Res = SplitVecOp_UnaryOp(N); break;
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ Res = SplitVecOp_UnaryOp(N);
+ break;
}
}
@@ -939,15 +979,13 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
// The result has a legal vector type, but the input needs splitting.
- MVT ResVT = N->getValueType(0);
+ EVT ResVT = N->getValueType(0);
SDValue Lo, Hi;
DebugLoc dl = N->getDebugLoc();
GetSplitVector(N->getOperand(0), Lo, Hi);
- assert(Lo.getValueType() == Hi.getValueType() &&
- "Returns legal non-power-of-two vector type?");
- MVT InVT = Lo.getValueType();
+ EVT InVT = Lo.getValueType();
- MVT OutVT = MVT::getVectorVT(ResVT.getVectorElementType(),
+ EVT OutVT = EVT::getVectorVT(*DAG.getContext(), ResVT.getVectorElementType(),
InVT.getVectorNumElements());
Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo);
@@ -975,7 +1013,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) {
SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
// We know that the extracted result type is legal. For now, assume the index
// is a constant.
- MVT SubVT = N->getValueType(0);
+ EVT SubVT = N->getValueType(0);
SDValue Idx = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
SDValue Lo, Hi;
@@ -997,7 +1035,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDValue Vec = N->getOperand(0);
SDValue Idx = N->getOperand(1);
- MVT VecVT = Vec.getValueType();
+ EVT VecVT = Vec.getValueType();
if (isa<ConstantSDNode>(Idx)) {
uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
@@ -1010,14 +1048,13 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
if (IdxVal < LoElts)
return DAG.UpdateNodeOperands(SDValue(N, 0), Lo, Idx);
- else
- return DAG.UpdateNodeOperands(SDValue(N, 0), Hi,
- DAG.getConstant(IdxVal - LoElts,
- Idx.getValueType()));
+ return DAG.UpdateNodeOperands(SDValue(N, 0), Hi,
+ DAG.getConstant(IdxVal - LoElts,
+ Idx.getValueType()));
}
// Store the vector to the stack.
- MVT EltVT = VecVT.getVectorElementType();
+ EVT EltVT = VecVT.getVectorElementType();
DebugLoc dl = N->getDebugLoc();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
@@ -1026,7 +1063,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Load back the required element.
StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
- return DAG.getLoad(EltVT, dl, Store, StackPtr, SV, 0);
+ return DAG.getExtLoad(ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
+ SV, 0, EltVT);
}
SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
@@ -1038,13 +1076,13 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
SDValue Ch = N->getChain();
SDValue Ptr = N->getBasePtr();
int SVOffset = N->getSrcValueOffset();
- MVT MemoryVT = N->getMemoryVT();
- unsigned Alignment = N->getAlignment();
+ EVT MemoryVT = N->getMemoryVT();
+ unsigned Alignment = N->getOriginalAlignment();
bool isVol = N->isVolatile();
SDValue Lo, Hi;
GetSplitVector(N->getOperand(1), Lo, Hi);
- MVT LoMemVT, HiMemVT;
+ EVT LoMemVT, HiMemVT;
GetSplitDestVTs(MemoryVT, LoMemVT, HiMemVT);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
@@ -1059,15 +1097,14 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
// Increment the pointer to the other half.
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
+ SVOffset += IncrementSize;
if (isTruncating)
- Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr,
- N->getSrcValue(), SVOffset+IncrementSize,
- HiMemVT,
- isVol, MinAlign(Alignment, IncrementSize));
+ Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr, N->getSrcValue(), SVOffset,
+ HiMemVT, isVol, Alignment);
else
- Hi = DAG.getStore(Ch, dl, Hi, Ptr, N->getSrcValue(), SVOffset+IncrementSize,
- isVol, MinAlign(Alignment, IncrementSize));
+ Hi = DAG.getStore(Ch, dl, Hi, Ptr, N->getSrcValue(), SVOffset,
+ isVol, Alignment);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
@@ -1078,18 +1115,19 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
//===----------------------------------------------------------------------===//
void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Widen node result " << ResNo << ": "; N->dump(&DAG);
- cerr << "\n");
+ DEBUG(errs() << "Widen node result " << ResNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n");
SDValue Res = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "WidenVectorResult #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "WidenVectorResult #" << ResNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n";
#endif
- assert(0 && "Do not know how to widen the result of this operator!");
- abort();
+ llvm_unreachable("Do not know how to widen the result of this operator!");
case ISD::BIT_CONVERT: Res = WidenVecRes_BIT_CONVERT(N); break;
case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break;
@@ -1102,9 +1140,12 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT: Res = WidenVecRes_SELECT(N); break;
case ISD::SELECT_CC: Res = WidenVecRes_SELECT_CC(N); break;
case ISD::UNDEF: Res = WidenVecRes_UNDEF(N); break;
- case ISD::VECTOR_SHUFFLE:
- Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N)); break;
- case ISD::VSETCC: Res = WidenVecRes_VSETCC(N); break;
+ case ISD::VECTOR_SHUFFLE:
+ Res = WidenVecRes_VECTOR_SHUFFLE(cast<ShuffleVectorSDNode>(N));
+ break;
+ case ISD::VSETCC:
+ Res = WidenVecRes_VSETCC(N);
+ break;
case ISD::ADD:
case ISD::AND:
@@ -1126,21 +1167,27 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::UDIV:
case ISD::UREM:
case ISD::SUB:
- case ISD::XOR: Res = WidenVecRes_Binary(N); break;
+ case ISD::XOR:
+ Res = WidenVecRes_Binary(N);
+ break;
case ISD::SHL:
case ISD::SRA:
- case ISD::SRL: Res = WidenVecRes_Shift(N); break;
+ case ISD::SRL:
+ Res = WidenVecRes_Shift(N);
+ break;
- case ISD::ANY_EXTEND:
case ISD::FP_ROUND:
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
- case ISD::SIGN_EXTEND:
case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
case ISD::TRUNCATE:
+ case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
- case ISD::UINT_TO_FP: Res = WidenVecRes_Convert(N); break;
+ case ISD::ANY_EXTEND:
+ Res = WidenVecRes_Convert(N);
+ break;
case ISD::CTLZ:
case ISD::CTPOP:
@@ -1149,7 +1196,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FCOS:
case ISD::FNEG:
case ISD::FSIN:
- case ISD::FSQRT: Res = WidenVecRes_Unary(N); break;
+ case ISD::FSQRT:
+ Res = WidenVecRes_Unary(N);
+ break;
}
// If Res is null, the sub-method took care of registering the result.
@@ -1159,7 +1208,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
// Binary op widening.
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
return DAG.getNode(N->getOpcode(), N->getDebugLoc(), WidenVT, InOp1, InOp2);
@@ -1169,12 +1218,12 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
SDValue InOp = N->getOperand(0);
DebugLoc dl = N->getDebugLoc();
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
- MVT InVT = InOp.getValueType();
- MVT InEltVT = InVT.getVectorElementType();
- MVT InWidenVT = MVT::getVectorVT(InEltVT, WidenNumElts);
+ EVT InVT = InOp.getValueType();
+ EVT InEltVT = InVT.getVectorElementType();
+ EVT InWidenVT = EVT::getVectorVT(*DAG.getContext(), InEltVT, WidenNumElts);
unsigned Opcode = N->getOpcode();
unsigned InVTNumElts = InVT.getVectorNumElements();
@@ -1216,7 +1265,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
// Otherwise unroll into some nasty scalar code and rebuild the vector.
SmallVector<SDValue, 16> Ops(WidenNumElts);
- MVT EltVT = WidenVT.getVectorElementType();
+ EVT EltVT = WidenVT.getVectorElementType();
unsigned MinElts = std::min(InVTNumElts, WidenNumElts);
unsigned i;
for (i=0; i < MinElts; ++i)
@@ -1232,16 +1281,16 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
}
SDValue DAGTypeLegalizer::WidenVecRes_Shift(SDNode *N) {
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp = GetWidenedVector(N->getOperand(0));
SDValue ShOp = N->getOperand(1);
- MVT ShVT = ShOp.getValueType();
+ EVT ShVT = ShOp.getValueType();
if (getTypeAction(ShVT) == WidenVector) {
ShOp = GetWidenedVector(ShOp);
ShVT = ShOp.getValueType();
}
- MVT ShWidenVT = MVT::getVectorVT(ShVT.getVectorElementType(),
+ EVT ShWidenVT = EVT::getVectorVT(*DAG.getContext(), ShVT.getVectorElementType(),
WidenVT.getVectorNumElements());
if (ShVT != ShWidenVT)
ShOp = ModifyToType(ShOp, ShWidenVT);
@@ -1251,16 +1300,16 @@ SDValue DAGTypeLegalizer::WidenVecRes_Shift(SDNode *N) {
SDValue DAGTypeLegalizer::WidenVecRes_Unary(SDNode *N) {
// Unary op widening.
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp = GetWidenedVector(N->getOperand(0));
return DAG.getNode(N->getOpcode(), N->getDebugLoc(), WidenVT, InOp);
}
SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
SDValue InOp = N->getOperand(0);
- MVT InVT = InOp.getValueType();
- MVT VT = N->getValueType(0);
- MVT WidenVT = TLI.getTypeToTransformTo(VT);
+ EVT InVT = InOp.getValueType();
+ EVT VT = N->getValueType(0);
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
DebugLoc dl = N->getDebugLoc();
switch (getTypeAction(InVT)) {
@@ -1300,13 +1349,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
// Determine new input vector type. The new input vector type will use
// the same element type (if its a vector) or use the input type as a
// vector. It is the same size as the type to widen to.
- MVT NewInVT;
+ EVT NewInVT;
unsigned NewNumElts = WidenSize / InSize;
if (InVT.isVector()) {
- MVT InEltVT = InVT.getVectorElementType();
- NewInVT= MVT::getVectorVT(InEltVT, WidenSize / InEltVT.getSizeInBits());
+ EVT InEltVT = InVT.getVectorElementType();
+ NewInVT= EVT::getVectorVT(*DAG.getContext(), InEltVT, WidenSize / InEltVT.getSizeInBits());
} else {
- NewInVT = MVT::getVectorVT(InVT, NewNumElts);
+ NewInVT = EVT::getVectorVT(*DAG.getContext(), InVT, NewNumElts);
}
if (TLI.isTypeLegal(NewInVT)) {
@@ -1332,28 +1381,17 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
}
}
- // This should occur rarely. Lower the bit-convert to a store/load
- // from the stack. Create the stack frame object. Make sure it is aligned
- // for both the source and destination types.
- SDValue FIPtr = DAG.CreateStackTemporary(InVT, WidenVT);
- int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
- const Value *SV = PseudoSourceValue::getFixedStack(FI);
-
- // Emit a store to the stack slot.
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, InOp, FIPtr, SV, 0);
-
- // Result is a load from the stack slot.
- return DAG.getLoad(WidenVT, dl, Store, FIPtr, SV, 0);
+ return CreateStackStoreLoad(InOp, WidenVT);
}
SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
// Build a vector with undefined for the new nodes.
- MVT VT = N->getValueType(0);
- MVT EltVT = VT.getVectorElementType();
+ EVT VT = N->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
unsigned NumElts = VT.getVectorNumElements();
- MVT WidenVT = TLI.getTypeToTransformTo(VT);
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SmallVector<SDValue, 16> NewOps(N->op_begin(), N->op_end());
@@ -1365,8 +1403,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_BUILD_VECTOR(SDNode *N) {
}
SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
- MVT InVT = N->getOperand(0).getValueType();
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT InVT = N->getOperand(0).getValueType();
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
DebugLoc dl = N->getDebugLoc();
unsigned WidenNumElts = WidenVT.getVectorNumElements();
unsigned NumOperands = N->getNumOperands();
@@ -1387,7 +1425,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
}
} else {
InputWidened = true;
- if (WidenVT == TLI.getTypeToTransformTo(InVT)) {
+ if (WidenVT == TLI.getTypeToTransformTo(*DAG.getContext(), InVT)) {
// The inputs and the result are widen to the same value.
unsigned i;
for (i=1; i < NumOperands; ++i)
@@ -1406,7 +1444,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
MaskOps[i] = i;
MaskOps[i+WidenNumElts/2] = i+WidenNumElts;
}
- return DAG.getVectorShuffle(WidenVT, dl,
+ return DAG.getVectorShuffle(WidenVT, dl,
GetWidenedVector(N->getOperand(0)),
GetWidenedVector(N->getOperand(1)),
&MaskOps[0]);
@@ -1415,7 +1453,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONCAT_VECTORS(SDNode *N) {
}
// Fall back to use extracts and build vector.
- MVT EltVT = WidenVT.getVectorElementType();
+ EVT EltVT = WidenVT.getVectorElementType();
unsigned NumInElts = InVT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(WidenNumElts);
unsigned Idx = 0;
@@ -1439,12 +1477,12 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SDValue RndOp = N->getOperand(3);
SDValue SatOp = N->getOperand(4);
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
- MVT InVT = InOp.getValueType();
- MVT InEltVT = InVT.getVectorElementType();
- MVT InWidenVT = MVT::getVectorVT(InEltVT, WidenNumElts);
+ EVT InVT = InOp.getValueType();
+ EVT InEltVT = InVT.getVectorElementType();
+ EVT InWidenVT = EVT::getVectorVT(*DAG.getContext(), InEltVT, WidenNumElts);
SDValue DTyOp = DAG.getValueType(WidenVT);
SDValue STyOp = DAG.getValueType(InWidenVT);
@@ -1491,7 +1529,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
// Otherwise unroll into some nasty scalar code and rebuild the vector.
SmallVector<SDValue, 16> Ops(WidenNumElts);
- MVT EltVT = WidenVT.getVectorElementType();
+ EVT EltVT = WidenVT.getVectorElementType();
DTyOp = DAG.getValueType(EltVT);
STyOp = DAG.getValueType(InEltVT);
@@ -1512,8 +1550,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
}
SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
- MVT VT = N->getValueType(0);
- MVT WidenVT = TLI.getTypeToTransformTo(VT);
+ EVT VT = N->getValueType(0);
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SDValue InOp = N->getOperand(0);
SDValue Idx = N->getOperand(1);
@@ -1522,7 +1560,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
if (getTypeAction(InOp.getValueType()) == WidenVector)
InOp = GetWidenedVector(InOp);
- MVT InVT = InOp.getValueType();
+ EVT InVT = InOp.getValueType();
ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx);
if (CIdx) {
@@ -1540,8 +1578,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_EXTRACT_SUBVECTOR(SDNode *N) {
// We could try widening the input to the right length but for now, extract
// the original elements, fill the rest with undefs and build a vector.
SmallVector<SDValue, 16> Ops(WidenNumElts);
- MVT EltVT = VT.getVectorElementType();
- MVT IdxVT = Idx.getValueType();
+ EVT EltVT = VT.getVectorElementType();
+ EVT IdxVT = Idx.getValueType();
unsigned NumElts = VT.getVectorNumElements();
unsigned i;
if (CIdx) {
@@ -1573,8 +1611,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) {
SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
LoadSDNode *LD = cast<LoadSDNode>(N);
- MVT WidenVT = TLI.getTypeToTransformTo(LD->getValueType(0));
- MVT LdVT = LD->getMemoryVT();
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), LD->getValueType(0));
+ EVT LdVT = LD->getMemoryVT();
DebugLoc dl = N->getDebugLoc();
assert(LdVT.isVector() && WidenVT.isVector());
@@ -1593,8 +1631,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
// For extension loads, we can not play the tricks of chopping legal
// vector types and bit cast it to the right type. Instead, we unroll
// the load and build a vector.
- MVT EltVT = WidenVT.getVectorElementType();
- MVT LdEltVT = LdVT.getVectorElementType();
+ EVT EltVT = WidenVT.getVectorElementType();
+ EVT LdEltVT = LdVT.getVectorElementType();
unsigned NumElts = LdVT.getVectorNumElements();
// Load each element and widen
@@ -1638,26 +1676,26 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
// Modified the chain - switch anything that used the old chain to use
// the new one.
- ReplaceValueWith(SDValue(N, 1), Chain);
+ ReplaceValueWith(SDValue(N, 1), NewChain);
return Result;
}
SDValue DAGTypeLegalizer::WidenVecRes_SCALAR_TO_VECTOR(SDNode *N) {
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getNode(ISD::SCALAR_TO_VECTOR, N->getDebugLoc(),
WidenVT, N->getOperand(0));
}
SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SDValue Cond1 = N->getOperand(0);
- MVT CondVT = Cond1.getValueType();
+ EVT CondVT = Cond1.getValueType();
if (CondVT.isVector()) {
- MVT CondEltVT = CondVT.getVectorElementType();
- MVT CondWidenVT = MVT::getVectorVT(CondEltVT, WidenNumElts);
+ EVT CondEltVT = CondVT.getVectorElementType();
+ EVT CondWidenVT = EVT::getVectorVT(*DAG.getContext(), CondEltVT, WidenNumElts);
if (getTypeAction(CondVT) == WidenVector)
Cond1 = GetWidenedVector(Cond1);
@@ -1681,15 +1719,15 @@ SDValue DAGTypeLegalizer::WidenVecRes_SELECT_CC(SDNode *N) {
}
SDValue DAGTypeLegalizer::WidenVecRes_UNDEF(SDNode *N) {
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getUNDEF(WidenVT);
}
SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
- MVT WidenVT = TLI.getTypeToTransformTo(VT);
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
unsigned NumElts = VT.getVectorNumElements();
unsigned WidenNumElts = WidenVT.getVectorNumElements();
@@ -1711,13 +1749,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N) {
}
SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) {
- MVT WidenVT = TLI.getTypeToTransformTo(N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SDValue InOp1 = N->getOperand(0);
- MVT InVT = InOp1.getValueType();
+ EVT InVT = InOp1.getValueType();
assert(InVT.isVector() && "can not widen non vector type");
- MVT WidenInVT = MVT::getVectorVT(InVT.getVectorElementType(), WidenNumElts);
+ EVT WidenInVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), WidenNumElts);
InOp1 = GetWidenedVector(InOp1);
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
@@ -1735,18 +1773,19 @@ SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) {
// Widen Vector Operand
//===----------------------------------------------------------------------===//
bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) {
- DEBUG(cerr << "Widen node operand " << ResNo << ": "; N->dump(&DAG);
- cerr << "\n");
+ DEBUG(errs() << "Widen node operand " << ResNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n");
SDValue Res = SDValue();
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
- cerr << "WidenVectorOperand op #" << ResNo << ": ";
- N->dump(&DAG); cerr << "\n";
+ errs() << "WidenVectorOperand op #" << ResNo << ": ";
+ N->dump(&DAG);
+ errs() << "\n";
#endif
- assert(0 && "Do not know how to widen this operator's operand!");
- abort();
+ llvm_unreachable("Do not know how to widen this operator's operand!");
case ISD::BIT_CONVERT: Res = WidenVecOp_BIT_CONVERT(N); break;
case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break;
@@ -1757,8 +1796,13 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
case ISD::TRUNCATE:
- case ISD::UINT_TO_FP: Res = WidenVecOp_Convert(N); break;
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ Res = WidenVecOp_Convert(N);
+ break;
}
// If Res is null, the sub-method took care of registering the result.
@@ -1781,15 +1825,15 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
// Since the result is legal and the input is illegal, it is unlikely
// that we can fix the input to a legal type so unroll the convert
// into some scalar code and create a nasty build vector.
- MVT VT = N->getValueType(0);
- MVT EltVT = VT.getVectorElementType();
+ EVT VT = N->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
DebugLoc dl = N->getDebugLoc();
unsigned NumElts = VT.getVectorNumElements();
SDValue InOp = N->getOperand(0);
if (getTypeAction(InOp.getValueType()) == WidenVector)
InOp = GetWidenedVector(InOp);
- MVT InVT = InOp.getValueType();
- MVT InEltVT = InVT.getVectorElementType();
+ EVT InVT = InOp.getValueType();
+ EVT InEltVT = InVT.getVectorElementType();
unsigned Opcode = N->getOpcode();
SmallVector<SDValue, 16> Ops(NumElts);
@@ -1802,9 +1846,9 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
}
SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
SDValue InOp = GetWidenedVector(N->getOperand(0));
- MVT InWidenVT = InOp.getValueType();
+ EVT InWidenVT = InOp.getValueType();
DebugLoc dl = N->getDebugLoc();
// Check if we can convert between two legal vector types and extract.
@@ -1812,7 +1856,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
unsigned Size = VT.getSizeInBits();
if (InWidenSize % Size == 0 && !VT.isVector()) {
unsigned NewNumElts = InWidenSize / Size;
- MVT NewVT = MVT::getVectorVT(VT, NewNumElts);
+ EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
if (TLI.isTypeLegal(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, InOp);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
@@ -1820,31 +1864,20 @@ SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
}
}
- // Lower the bit-convert to a store/load from the stack. Create the stack
- // frame object. Make sure it is aligned for both the source and destination
- // types.
- SDValue FIPtr = DAG.CreateStackTemporary(InWidenVT, VT);
- int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
- const Value *SV = PseudoSourceValue::getFixedStack(FI);
-
- // Emit a store to the stack slot.
- SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, InOp, FIPtr, SV, 0);
-
- // Result is a load from the stack slot.
- return DAG.getLoad(VT, dl, Store, FIPtr, SV, 0);
+ return CreateStackStoreLoad(InOp, VT);
}
SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
// If the input vector is not legal, it is likely that we will not find a
// legal vector of the same size. Replace the concatenate vector with a
// nasty build vector.
- MVT VT = N->getValueType(0);
- MVT EltVT = VT.getVectorElementType();
+ EVT VT = N->getValueType(0);
+ EVT EltVT = VT.getVectorElementType();
DebugLoc dl = N->getDebugLoc();
unsigned NumElts = VT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(NumElts);
- MVT InVT = N->getOperand(0).getValueType();
+ EVT InVT = N->getOperand(0).getValueType();
unsigned NumInElts = InVT.getVectorNumElements();
unsigned Idx = 0;
@@ -1862,9 +1895,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_CONCAT_VECTORS(SDNode *N) {
SDValue DAGTypeLegalizer::WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDValue InOp = GetWidenedVector(N->getOperand(0));
- MVT EltVT = InOp.getValueType().getVectorElementType();
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, N->getDebugLoc(),
- EltVT, InOp, N->getOperand(1));
+ N->getValueType(0), InOp, N->getOperand(1));
}
SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
@@ -1880,8 +1912,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
SDValue ValOp = GetWidenedVector(ST->getValue());
DebugLoc dl = N->getDebugLoc();
- MVT StVT = ST->getMemoryVT();
- MVT ValVT = ValOp.getValueType();
+ EVT StVT = ST->getMemoryVT();
+ EVT ValVT = ValOp.getValueType();
// It must be true that we the widen vector type is bigger than where
// we need to store.
assert(StVT.isVector() && ValOp.getValueType().isVector());
@@ -1892,8 +1924,8 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
// For truncating stores, we can not play the tricks of chopping legal
// vector types and bit cast it to the right type. Instead, we unroll
// the store.
- MVT StEltVT = StVT.getVectorElementType();
- MVT ValEltVT = ValVT.getVectorElementType();
+ EVT StEltVT = StVT.getVectorElementType();
+ EVT ValEltVT = ValVT.getVectorElementType();
unsigned Increment = ValEltVT.getSizeInBits() / 8;
unsigned NumElts = StVT.getVectorNumElements();
SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ValEltVT, ValOp,
@@ -1938,9 +1970,10 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
// VecVT: Vector value type whose size we must match.
// Returns NewVecVT and NewEltVT - the vector type and its associated
// element type.
-static void FindAssocWidenVecType(const TargetLowering &TLI, unsigned Width,
- MVT VecVT,
- MVT& NewEltVT, MVT& NewVecVT) {
+static void FindAssocWidenVecType(SelectionDAG& DAG,
+ const TargetLowering &TLI, unsigned Width,
+ EVT VecVT,
+ EVT& NewEltVT, EVT& NewVecVT) {
unsigned EltWidth = Width + 1;
if (TLI.isTypeLegal(VecVT)) {
// We start with the preferred with, making it a power of 2 and find a
@@ -1950,9 +1983,9 @@ static void FindAssocWidenVecType(const TargetLowering &TLI, unsigned Width,
do {
assert(EltWidth > 0);
EltWidth = 1 << Log2_32(EltWidth - 1);
- NewEltVT = MVT::getIntegerVT(EltWidth);
+ NewEltVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = VecVT.getSizeInBits() / EltWidth;
- NewVecVT = MVT::getVectorVT(NewEltVT, NumElts);
+ NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT, NumElts);
} while (!TLI.isTypeLegal(NewVecVT) ||
VecVT.getSizeInBits() != NewVecVT.getSizeInBits());
} else {
@@ -1965,9 +1998,9 @@ static void FindAssocWidenVecType(const TargetLowering &TLI, unsigned Width,
do {
assert(EltWidth > 0);
EltWidth = 1 << Log2_32(EltWidth - 1);
- NewEltVT = MVT::getIntegerVT(EltWidth);
+ NewEltVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = VecVT.getSizeInBits() / EltWidth;
- NewVecVT = MVT::getVectorVT(NewEltVT, NumElts);
+ NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEltVT, NumElts);
} while (!TLI.isTypeLegal(NewEltVT) ||
VecVT.getSizeInBits() != NewVecVT.getSizeInBits());
}
@@ -1981,7 +2014,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
unsigned Alignment,
bool isVolatile,
unsigned LdWidth,
- MVT ResType,
+ EVT ResType,
DebugLoc dl) {
// The strategy assumes that we can efficiently load powers of two widths.
// The routines chops the vector into the largest power of 2 load and
@@ -1992,9 +2025,9 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
// the load is nonvolatile, we an use a wider load for the value.
// Find the vector type that can load from.
- MVT NewEltVT, NewVecVT;
+ EVT NewEltVT, NewVecVT;
unsigned NewEltVTWidth;
- FindAssocWidenVecType(TLI, LdWidth, ResType, NewEltVT, NewVecVT);
+ FindAssocWidenVecType(DAG, TLI, LdWidth, ResType, NewEltVT, NewVecVT);
NewEltVTWidth = NewEltVT.getSizeInBits();
SDValue LdOp = DAG.getLoad(NewEltVT, dl, Chain, BasePtr, SV, SVOffset,
@@ -2021,7 +2054,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
// Our current type we are using is too large, use a smaller size by
// using a smaller power of 2
unsigned oNewEltVTWidth = NewEltVTWidth;
- FindAssocWidenVecType(TLI, LdWidth, ResType, NewEltVT, NewVecVT);
+ FindAssocWidenVecType(DAG, TLI, LdWidth, ResType, NewEltVT, NewVecVT);
NewEltVTWidth = NewEltVT.getSizeInBits();
// Readjust position and vector position based on new load type
Idx = Idx * (oNewEltVTWidth/NewEltVTWidth);
@@ -2056,10 +2089,10 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
// want to store. This avoids requiring a stack convert.
// Find a width of the element type we can store with
- MVT WidenVT = ValOp.getValueType();
- MVT NewEltVT, NewVecVT;
+ EVT WidenVT = ValOp.getValueType();
+ EVT NewEltVT, NewVecVT;
- FindAssocWidenVecType(TLI, StWidth, WidenVT, NewEltVT, NewVecVT);
+ FindAssocWidenVecType(DAG, TLI, StWidth, WidenVT, NewEltVT, NewVecVT);
unsigned NewEltVTWidth = NewEltVT.getSizeInBits();
SDValue VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVecVT, ValOp);
@@ -2088,7 +2121,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
// Our current type we are using is too large, use a smaller size by
// using a smaller power of 2
unsigned oNewEltVTWidth = NewEltVTWidth;
- FindAssocWidenVecType(TLI, StWidth, WidenVT, NewEltVT, NewVecVT);
+ FindAssocWidenVecType(DAG, TLI, StWidth, WidenVT, NewEltVT, NewVecVT);
NewEltVTWidth = NewEltVT.getSizeInBits();
// Readjust position and vector position based on new load type
Idx = Idx * (oNewEltVTWidth/NewEltVTWidth);
@@ -2106,10 +2139,10 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
/// Modifies a vector input (widen or narrows) to a vector of NVT. The
/// input vector must have the same element type as NVT.
-SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, MVT NVT) {
+SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, EVT NVT) {
// Note that InOp might have been widened so it might already have
// the right width or it might need be narrowed.
- MVT InVT = InOp.getValueType();
+ EVT InVT = InOp.getValueType();
assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
"input and widen element type must match");
DebugLoc dl = InOp.getDebugLoc();
@@ -2137,7 +2170,7 @@ SDValue DAGTypeLegalizer::ModifyToType(SDValue InOp, MVT NVT) {
// Fall back to extract and build.
SmallVector<SDValue, 16> Ops(WidenNumElts);
- MVT EltVT = NVT.getVectorElementType();
+ EVT EltVT = NVT.getVectorElementType();
unsigned MinNumElts = std::min(WidenNumElts, InNumElts);
unsigned Idx;
for (Idx = 0; Idx < MinNumElts; ++Idx)
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index af73b28..e0f93d8 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -24,6 +24,8 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
using namespace llvm;
STATISTIC(NumUnfolds, "Number of nodes unfolded");
@@ -108,14 +110,14 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGFast::Schedule() {
- DOUT << "********** List Scheduling **********\n";
+ DEBUG(errs() << "********** List Scheduling **********\n");
NumLiveRegs = 0;
LiveRegDefs.resize(TRI->getNumRegs(), NULL);
LiveRegCycles.resize(TRI->getNumRegs(), 0);
// Build the scheduling graph.
- BuildSchedGraph();
+ BuildSchedGraph(NULL);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
@@ -132,17 +134,17 @@ void ScheduleDAGFast::Schedule() {
/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
- --PredSU->NumSuccsLeft;
-
+
#ifndef NDEBUG
- if (PredSU->NumSuccsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (PredSU->NumSuccsLeft == 0) {
+ errs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ errs() << " has been released too many times!\n";
+ llvm_unreachable(0);
}
#endif
-
+ --PredSU->NumSuccsLeft;
+
// If all the node's successors are scheduled, this node is ready
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
@@ -174,7 +176,7 @@ void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
+ DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
@@ -214,7 +216,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
- MVT VT = N->getValueType(i);
+ EVT VT = N->getValueType(i);
if (VT == MVT::Flag)
return NULL;
else if (VT == MVT::Other)
@@ -222,7 +224,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDValue &Op = N->getOperand(i);
- MVT VT = Op.getNode()->getValueType(Op.getResNo());
+ EVT VT = Op.getNode()->getValueType(Op.getResNo());
if (VT == MVT::Flag)
return NULL;
}
@@ -232,7 +234,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return NULL;
- DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
+ DEBUG(errs() << "Unfolding SU # " << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
N = NewNodes[1];
@@ -342,7 +344,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
SU = NewSU;
}
- DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
+ DEBUG(errs() << "Duplicating SU # " << SU->NodeNum << "\n");
NewSU = Clone(SU);
// New SUnit has the exact same predecessors.
@@ -419,7 +421,7 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
/// getPhysicalRegisterVT - Returns the ValueType of the physical register
/// definition of the specified node.
/// FIXME: Move to SelectionDAG?
-static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
+static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const TargetInstrInfo *TII) {
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
@@ -533,7 +535,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
- MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
+ EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
TRI->getPhysicalRegisterRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
@@ -549,16 +551,16 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
// Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
- DOUT << "Adding an edge from SU # " << TrySU->NodeNum
- << " to SU #" << Copies.front()->NodeNum << "\n";
+ DEBUG(errs() << "Adding an edge from SU # " << TrySU->NodeNum
+ << " to SU #" << Copies.front()->NodeNum << "\n");
AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false, /*isArtificial=*/true));
NewDef = Copies.back();
}
- DOUT << "Adding an edge from SU # " << NewDef->NodeNum
- << " to SU #" << TrySU->NodeNum << "\n";
+ DEBUG(errs() << "Adding an edge from SU # " << NewDef->NodeNum
+ << " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
/*Reg=*/0, /*isNormalMemory=*/false,
@@ -568,8 +570,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
}
if (!CurSU) {
- assert(false && "Unable to resolve live physical register dependencies!");
- abort();
+ llvm_unreachable("Unable to resolve live physical register dependencies!");
}
}
@@ -587,41 +588,11 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
++CurCycle;
}
- // Reverse the order if it is bottom up.
+ // Reverse the order since it is bottom up.
std::reverse(Sequence.begin(), Sequence.end());
-
-
+
#ifndef NDEBUG
- // Verify that all SUnits were scheduled.
- bool AnyNotSched = false;
- unsigned DeadNodes = 0;
- unsigned Noops = 0;
- for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
- if (!SUnits[i].isScheduled) {
- if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
- ++DeadNodes;
- continue;
- }
- if (!AnyNotSched)
- cerr << "*** List scheduling failed! ***\n";
- SUnits[i].dump(this);
- cerr << "has not been scheduled!\n";
- AnyNotSched = true;
- }
- if (SUnits[i].NumSuccsLeft != 0) {
- if (!AnyNotSched)
- cerr << "*** List scheduling failed! ***\n";
- SUnits[i].dump(this);
- cerr << "has successors left!\n";
- AnyNotSched = true;
- }
- }
- for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
- if (!Sequence[i])
- ++Noops;
- assert(!AnyNotSched);
- assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
- "The number of nodes scheduled doesn't match the expected number!");
+ VerifySchedule(/*isBottomUp=*/true);
#endif
}
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
index c432534..c8d2158 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
@@ -29,6 +29,8 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/Statistic.h"
#include <climits>
@@ -86,10 +88,10 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGList::Schedule() {
- DOUT << "********** List Scheduling **********\n";
+ DEBUG(errs() << "********** List Scheduling **********\n");
// Build the scheduling graph.
- BuildSchedGraph();
+ BuildSchedGraph(NULL);
AvailableQueue->initNodes(SUnits);
@@ -106,17 +108,17 @@ void ScheduleDAGList::Schedule() {
/// the PendingQueue if the count reaches zero. Also update its cycle bound.
void ScheduleDAGList::ReleaseSucc(SUnit *SU, const SDep &D) {
SUnit *SuccSU = D.getSUnit();
- --SuccSU->NumPredsLeft;
-
+
#ifndef NDEBUG
- if (SuccSU->NumPredsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (SuccSU->NumPredsLeft == 0) {
+ errs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ errs() << " has been released too many times!\n";
+ llvm_unreachable(0);
}
#endif
-
+ --SuccSU->NumPredsLeft;
+
SuccSU->setDepthToAtLeast(SU->getDepth() + D.getLatency());
// If all the node's predecessors are scheduled, this node is ready
@@ -140,7 +142,7 @@ void ScheduleDAGList::ReleaseSuccessors(SUnit *SU) {
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
+ DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
Sequence.push_back(SU);
@@ -232,7 +234,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem, just advance
// the current cycle and try again.
- DOUT << "*** Advancing cycle, no work to do\n";
+ DEBUG(errs() << "*** Advancing cycle, no work to do\n");
HazardRec->AdvanceCycle();
++NumStalls;
++CurCycle;
@@ -240,7 +242,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
// Otherwise, we have no instructions to issue and we have instructions
// that will fault if we don't do this right. This is the case for
// processors without pipeline interlocks and other cases.
- DOUT << "*** Emitting noop\n";
+ DEBUG(errs() << "*** Emitting noop\n");
HazardRec->EmitNoop();
Sequence.push_back(0); // NULL here means noop
++NumNoops;
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index c97e2a8..cec24e6 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -25,10 +25,12 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/raw_ostream.h"
#include <climits>
using namespace llvm;
@@ -163,14 +165,14 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGRRList::Schedule() {
- DOUT << "********** List Scheduling **********\n";
+ DEBUG(errs() << "********** List Scheduling **********\n");
NumLiveRegs = 0;
LiveRegDefs.resize(TRI->getNumRegs(), NULL);
LiveRegCycles.resize(TRI->getNumRegs(), 0);
// Build the scheduling graph.
- BuildSchedGraph();
+ BuildSchedGraph(NULL);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
@@ -195,17 +197,17 @@ void ScheduleDAGRRList::Schedule() {
/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
SUnit *PredSU = PredEdge->getSUnit();
- --PredSU->NumSuccsLeft;
-
+
#ifndef NDEBUG
- if (PredSU->NumSuccsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (PredSU->NumSuccsLeft == 0) {
+ errs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ errs() << " has been released too many times!\n";
+ llvm_unreachable(0);
}
#endif
-
+ --PredSU->NumSuccsLeft;
+
// If all the node's successors are scheduled, this node is ready
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
@@ -237,7 +239,7 @@ void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
+ DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
@@ -276,13 +278,14 @@ void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
AvailableQueue->remove(PredSU);
}
+ assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
++PredSU->NumSuccsLeft;
}
/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
/// its predecessor states to reflect the change.
void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
- DOUT << "*** Unscheduling [" << SU->getHeight() << "]: ";
+ DEBUG(errs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
DEBUG(SU->dump(this));
AvailableQueue->UnscheduledNode(SU);
@@ -351,7 +354,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
- MVT VT = N->getValueType(i);
+ EVT VT = N->getValueType(i);
if (VT == MVT::Flag)
return NULL;
else if (VT == MVT::Other)
@@ -359,7 +362,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDValue &Op = N->getOperand(i);
- MVT VT = Op.getNode()->getValueType(Op.getResNo());
+ EVT VT = Op.getNode()->getValueType(Op.getResNo());
if (VT == MVT::Flag)
return NULL;
}
@@ -369,7 +372,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return NULL;
- DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
+ DEBUG(errs() << "Unfolding SU # " << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
N = NewNodes[1];
@@ -488,7 +491,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
SU = NewSU;
}
- DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
+ DEBUG(errs() << "Duplicating SU # " << SU->NodeNum << "\n");
NewSU = CreateClone(SU);
// New SUnit has the exact same predecessors.
@@ -570,7 +573,7 @@ void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
/// getPhysicalRegisterVT - Returns the ValueType of the physical register
/// definition of the specified node.
/// FIXME: Move to SelectionDAG?
-static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
+static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const TargetInstrInfo *TII) {
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
@@ -753,7 +756,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
- MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
+ EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
TRI->getPhysicalRegisterRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
@@ -769,8 +772,8 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
// Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
- DOUT << "Adding an edge from SU #" << TrySU->NodeNum
- << " to SU #" << Copies.front()->NodeNum << "\n";
+ DEBUG(errs() << "Adding an edge from SU #" << TrySU->NodeNum
+ << " to SU #" << Copies.front()->NodeNum << "\n");
AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false,
@@ -778,8 +781,8 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
NewDef = Copies.back();
}
- DOUT << "Adding an edge from SU #" << NewDef->NodeNum
- << " to SU #" << TrySU->NodeNum << "\n";
+ DEBUG(errs() << "Adding an edge from SU #" << NewDef->NodeNum
+ << " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
/*Reg=*/0, /*isNormalMemory=*/false,
@@ -822,17 +825,17 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
- --SuccSU->NumPredsLeft;
-
+
#ifndef NDEBUG
- if (SuccSU->NumPredsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (SuccSU->NumPredsLeft == 0) {
+ errs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ errs() << " has been released too many times!\n";
+ llvm_unreachable(0);
}
#endif
-
+ --SuccSU->NumPredsLeft;
+
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
@@ -856,7 +859,7 @@ void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
+ DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
@@ -1215,7 +1218,7 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
if (!SUImpDefs)
return false;
for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
- MVT VT = N->getValueType(i);
+ EVT VT = N->getValueType(i);
if (VT == MVT::Flag || VT == MVT::Other)
continue;
if (!N->hasAnyUseOfValue(i))
@@ -1328,9 +1331,9 @@ void RegReductionPriorityQueue<SF>::PrescheduleNodesWithMultipleUses() {
// Ok, the transformation is safe and the heuristics suggest it is
// profitable. Update the graph.
- DOUT << "Prescheduling SU # " << SU->NodeNum
- << " next to PredSU # " << PredSU->NodeNum
- << " to guide scheduling in the presence of multiple uses\n";
+ DEBUG(errs() << "Prescheduling SU # " << SU->NodeNum
+ << " next to PredSU # " << PredSU->NodeNum
+ << " to guide scheduling in the presence of multiple uses\n");
for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
SDep Edge = PredSU->Succs[i];
assert(!Edge.isAssignedRegDep());
@@ -1418,8 +1421,8 @@ void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
(hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
(!SU->isCommutable && SuccSU->isCommutable)) &&
!scheduleDAG->IsReachable(SuccSU, SU)) {
- DOUT << "Adding a pseudo-two-addr edge from SU # " << SU->NodeNum
- << " to SU #" << SuccSU->NodeNum << "\n";
+ DEBUG(errs() << "Adding a pseudo-two-addr edge from SU # "
+ << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false,
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 7aa15bc..d53de34 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -14,10 +14,12 @@
#define DEBUG_TYPE "pre-RA-sched"
#include "ScheduleDAGSDNodes.h"
+#include "InstrEmitter.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtarget.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -152,6 +154,11 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
}
void ScheduleDAGSDNodes::AddSchedEdges() {
+ const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
+
+ // Check to see if the scheduler cares about latencies.
+ bool UnitLatencies = ForceUnitLatencies();
+
// Pass 2: add the preds, succs, etc.
for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
SUnit *SU = &SUnits[su];
@@ -175,7 +182,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (N->isMachineOpcode() &&
TII->get(N->getMachineOpcode()).getImplicitDefs()) {
SU->hasPhysRegClobbers = true;
- unsigned NumUsed = CountResults(N);
+ unsigned NumUsed = InstrEmitter::CountResults(N);
while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
--NumUsed; // Skip over unused values at the end.
if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
@@ -189,7 +196,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
assert(OpSU && "Node has no SUnit!");
if (OpSU == SU) continue; // In the same group.
- MVT OpVT = N->getOperand(i).getValueType();
+ EVT OpVT = N->getOperand(i).getValueType();
assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
bool isChain = OpVT == MVT::Other;
@@ -206,8 +213,15 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
// dependency. This may change in the future though.
if (Cost >= 0)
PhysReg = 0;
- SU->addPred(SDep(OpSU, isChain ? SDep::Order : SDep::Data,
- OpSU->Latency, PhysReg));
+
+ const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
+ OpSU->Latency, PhysReg);
+ if (!isChain && !UnitLatencies) {
+ ComputeOperandLatency(OpSU, SU, (SDep &)dep);
+ ST.adjustSchedDependency(OpSU, SU, (SDep &)dep);
+ }
+
+ SU->addPred(dep);
}
}
}
@@ -217,7 +231,7 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
-void ScheduleDAGSDNodes::BuildSchedGraph() {
+void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
// Populate the SUnits array.
BuildSchedUnits();
// Compute all the scheduling dependencies between nodes.
@@ -230,65 +244,68 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
// Compute the latency for the node. We use the sum of the latencies for
// all nodes flagged together into this SUnit.
SU->Latency = 0;
- bool SawMachineOpcode = false;
for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
if (N->isMachineOpcode()) {
- SawMachineOpcode = true;
- SU->Latency +=
- InstrItins.getLatency(TII->get(N->getMachineOpcode()).getSchedClass());
+ SU->Latency += InstrItins.
+ getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
}
}
-/// CountResults - The results of target nodes have register or immediate
-/// operands first, then an optional chain, and optional flag operands (which do
-/// not go into the resulting MachineInstr).
-unsigned ScheduleDAGSDNodes::CountResults(SDNode *Node) {
- unsigned N = Node->getNumValues();
- while (N && Node->getValueType(N - 1) == MVT::Flag)
- --N;
- if (N && Node->getValueType(N - 1) == MVT::Other)
- --N; // Skip over chain result.
- return N;
-}
-
-/// CountOperands - The inputs to target nodes have any actual inputs first,
-/// followed by special operands that describe memory references, then an
-/// optional chain operand, then an optional flag operand. Compute the number
-/// of actual operands that will go into the resulting MachineInstr.
-unsigned ScheduleDAGSDNodes::CountOperands(SDNode *Node) {
- unsigned N = ComputeMemOperandsEnd(Node);
- while (N && isa<MemOperandSDNode>(Node->getOperand(N - 1).getNode()))
- --N; // Ignore MEMOPERAND nodes
- return N;
-}
-
-/// ComputeMemOperandsEnd - Find the index one past the last MemOperandSDNode
-/// operand
-unsigned ScheduleDAGSDNodes::ComputeMemOperandsEnd(SDNode *Node) {
- unsigned N = Node->getNumOperands();
- while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
- --N;
- if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
- --N; // Ignore chain if it exists.
- return N;
-}
-
-
void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
if (!SU->getNode()) {
- cerr << "PHYS REG COPY\n";
+ errs() << "PHYS REG COPY\n";
return;
}
SU->getNode()->dump(DAG);
- cerr << "\n";
+ errs() << "\n";
SmallVector<SDNode *, 4> FlaggedNodes;
for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
FlaggedNodes.push_back(N);
while (!FlaggedNodes.empty()) {
- cerr << " ";
+ errs() << " ";
FlaggedNodes.back()->dump(DAG);
- cerr << "\n";
+ errs() << "\n";
FlaggedNodes.pop_back();
}
}
+
+/// EmitSchedule - Emit the machine code in scheduled order.
+MachineBasicBlock *ScheduleDAGSDNodes::
+EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
+ InstrEmitter Emitter(BB, InsertPos);
+ DenseMap<SDValue, unsigned> VRBaseMap;
+ DenseMap<SUnit*, unsigned> CopyVRBaseMap;
+ for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
+ SUnit *SU = Sequence[i];
+ if (!SU) {
+ // Null SUnit* is a noop.
+ EmitNoop();
+ continue;
+ }
+
+ // For pre-regalloc scheduling, create instructions corresponding to the
+ // SDNode and any flagged SDNodes and append them to the block.
+ if (!SU->getNode()) {
+ // Emit a copy.
+ EmitPhysRegCopy(SU, CopyVRBaseMap);
+ continue;
+ }
+
+ SmallVector<SDNode *, 4> FlaggedNodes;
+ for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
+ N = N->getFlaggedNode())
+ FlaggedNodes.push_back(N);
+ while (!FlaggedNodes.empty()) {
+ Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
+ VRBaseMap, EM);
+ FlaggedNodes.pop_back();
+ }
+ Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
+ VRBaseMap, EM);
+ }
+
+ BB = Emitter.getBlock();
+ InsertPos = Emitter.getInsertPos();
+ return BB;
+}
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 2a278b7..c9c36f7 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -58,7 +58,6 @@ namespace llvm {
if (isa<ConstantPoolSDNode>(Node)) return true;
if (isa<JumpTableSDNode>(Node)) return true;
if (isa<ExternalSymbolSDNode>(Node)) return true;
- if (isa<MemOperandSDNode>(Node)) return true;
if (Node->getOpcode() == ISD::EntryToken) return true;
return false;
}
@@ -87,35 +86,14 @@ namespace llvm {
/// are input. This SUnit graph is similar to the SelectionDAG, but
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
- virtual void BuildSchedGraph();
+ virtual void BuildSchedGraph(AliasAnalysis *AA);
/// ComputeLatency - Compute node latency.
///
virtual void ComputeLatency(SUnit *SU);
- /// CountResults - The results of target nodes have register or immediate
- /// operands first, then an optional chain, and optional flag operands
- /// (which do not go into the machine instrs.)
- static unsigned CountResults(SDNode *Node);
-
- /// CountOperands - The inputs to target nodes have any actual inputs first,
- /// followed by special operands that describe memory references, then an
- /// optional chain operand, then flag operands. Compute the number of
- /// actual operands that will go into the resulting MachineInstr.
- static unsigned CountOperands(SDNode *Node);
-
- /// ComputeMemOperandsEnd - Find the index one past the last
- /// MemOperandSDNode operand
- static unsigned ComputeMemOperandsEnd(SDNode *Node);
-
- /// EmitNode - Generate machine code for an node and needed dependencies.
- /// VRBaseMap contains, for each already emitted node, the first virtual
- /// register number for the results of the node.
- ///
- void EmitNode(SDNode *Node, bool IsClone, bool HasClone,
- DenseMap<SDValue, unsigned> &VRBaseMap);
-
- virtual MachineBasicBlock *EmitSchedule();
+ virtual MachineBasicBlock *
+ EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM);
/// Schedule - Order nodes according to selected style, filling
/// in the Sequence member.
@@ -129,47 +107,6 @@ namespace llvm {
virtual void getCustomGraphFeatures(GraphWriter<ScheduleDAG*> &GW) const;
private:
- /// EmitSubregNode - Generate machine code for subreg nodes.
- ///
- void EmitSubregNode(SDNode *Node,
- DenseMap<SDValue, unsigned> &VRBaseMap);
-
- /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS
- /// nodes.
- ///
- void EmitCopyToRegClassNode(SDNode *Node,
- DenseMap<SDValue, unsigned> &VRBaseMap);
-
- /// getVR - Return the virtual register corresponding to the specified result
- /// of the specified node.
- unsigned getVR(SDValue Op, DenseMap<SDValue, unsigned> &VRBaseMap);
-
- /// getDstOfCopyToRegUse - If the only use of the specified result number of
- /// node is a CopyToReg, return its destination register. Return 0 otherwise.
- unsigned getDstOfOnlyCopyToRegUse(SDNode *Node, unsigned ResNo) const;
-
- void AddOperand(MachineInstr *MI, SDValue Op, unsigned IIOpNum,
- const TargetInstrDesc *II,
- DenseMap<SDValue, unsigned> &VRBaseMap);
-
- /// AddRegisterOperand - Add the specified register as an operand to the
- /// specified machine instr. Insert register copies if the register is
- /// not in the required register class.
- void AddRegisterOperand(MachineInstr *MI, SDValue Op,
- unsigned IIOpNum, const TargetInstrDesc *II,
- DenseMap<SDValue, unsigned> &VRBaseMap);
-
- /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
- /// implicit physical register output.
- void EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone,
- bool IsCloned, unsigned SrcReg,
- DenseMap<SDValue, unsigned> &VRBaseMap);
-
- void CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
- const TargetInstrDesc &II, bool IsClone,
- bool IsCloned,
- DenseMap<SDValue, unsigned> &VRBaseMap);
-
/// BuildSchedUnits, AddSchedEdges - Helper functions for BuildSchedGraph.
void BuildSchedUnits();
void AddSchedEdges();
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index c8f4b52..542bf64 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -13,6 +13,7 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Constants.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Function.h"
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
@@ -31,6 +32,7 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -46,14 +48,14 @@ using namespace llvm;
/// makeVTList - Return an instance of the SDVTList struct initialized with the
/// specified members.
-static SDVTList makeVTList(const MVT *VTs, unsigned NumVTs) {
+static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
SDVTList Res = {VTs, NumVTs};
return Res;
}
-static const fltSemantics *MVTToAPFloatSemantics(MVT VT) {
- switch (VT.getSimpleVT()) {
- default: assert(0 && "Unknown FP format");
+static const fltSemantics *EVTToAPFloatSemantics(EVT VT) {
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unknown FP format");
case MVT::f32: return &APFloat::IEEEsingle;
case MVT::f64: return &APFloat::IEEEdouble;
case MVT::f80: return &APFloat::x87DoubleExtended;
@@ -76,7 +78,7 @@ bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
return getValueAPF().bitwiseIsEqual(V);
}
-bool ConstantFPSDNode::isValueValidForType(MVT VT,
+bool ConstantFPSDNode::isValueValidForType(EVT VT,
const APFloat& Val) {
assert(VT.isFloatingPoint() && "Can only convert between FP types");
@@ -88,7 +90,7 @@ bool ConstantFPSDNode::isValueValidForType(MVT VT,
// convert modifies in place, so make a copy.
APFloat Val2 = APFloat(Val);
bool losesInfo;
- (void) Val2.convert(*MVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
+ (void) Val2.convert(*EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
&losesInfo);
return !losesInfo;
}
@@ -243,7 +245,7 @@ ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
/// if the operation does not depend on the sign of the input (setne and seteq).
static int isSignedOp(ISD::CondCode Opcode) {
switch (Opcode) {
- default: assert(0 && "Illegal integer setcc operation!");
+ default: llvm_unreachable("Illegal integer setcc operation!");
case ISD::SETEQ:
case ISD::SETNE: return 0;
case ISD::SETLT:
@@ -363,11 +365,8 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
switch (N->getOpcode()) {
case ISD::TargetExternalSymbol:
case ISD::ExternalSymbol:
- assert(0 && "Should only be used on nodes with operands");
+ llvm_unreachable("Should only be used on nodes with operands");
default: break; // Normal nodes don't need extra info.
- case ISD::ARG_FLAGS:
- ID.AddInteger(cast<ARG_FLAGSSDNode>(N)->getArgFlags().getRawBits());
- break;
case ISD::TargetConstant:
case ISD::Constant:
ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
@@ -403,11 +402,6 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
case ISD::SRCVALUE:
ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
break;
- case ISD::MEMOPERAND: {
- const MachineMemOperand &MO = cast<MemOperandSDNode>(N)->MO;
- MO.Profile(ID);
- break;
- }
case ISD::FrameIndex:
case ISD::TargetFrameIndex:
ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
@@ -429,12 +423,6 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(CP->getTargetFlags());
break;
}
- case ISD::CALL: {
- const CallSDNode *Call = cast<CallSDNode>(N);
- ID.AddInteger(Call->getCallingConv());
- ID.AddInteger(Call->isVarArg());
- break;
- }
case ISD::LOAD: {
const LoadSDNode *LD = cast<LoadSDNode>(N);
ID.AddInteger(LD->getMemoryVT().getRawBits());
@@ -466,7 +454,7 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
}
case ISD::VECTOR_SHUFFLE: {
const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
- for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
+ for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
i != e; ++i)
ID.AddInteger(SVN->getMaskElt(i));
break;
@@ -488,20 +476,18 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
}
/// encodeMemSDNodeFlags - Generic routine for computing a value for use in
-/// the CSE map that carries alignment, volatility, indexing mode, and
+/// the CSE map that carries volatility, indexing mode, and
/// extension/truncation information.
///
static inline unsigned
-encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM,
- bool isVolatile, unsigned Alignment) {
+encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile) {
assert((ConvType & 3) == ConvType &&
"ConvType may not require more than 2 bits!");
assert((AM & 7) == AM &&
"AM may not require more than 3 bits!");
return ConvType |
(AM << 2) |
- (isVolatile << 5) |
- ((Log2_32(Alignment) + 1) << 6);
+ (isVolatile << 5);
}
//===----------------------------------------------------------------------===//
@@ -519,7 +505,6 @@ static bool doNotCSE(SDNode *N) {
case ISD::DBG_LABEL:
case ISD::DBG_STOPPOINT:
case ISD::EH_LABEL:
- case ISD::DECLARE:
return true; // Never CSE these nodes.
}
@@ -626,7 +611,7 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
bool Erased = false;
switch (N->getOpcode()) {
case ISD::EntryToken:
- assert(0 && "EntryToken should not be in CSEMaps!");
+ llvm_unreachable("EntryToken should not be in CSEMaps!");
return false;
case ISD::HANDLENODE: return false; // noop.
case ISD::CONDCODE:
@@ -646,12 +631,12 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
break;
}
case ISD::VALUETYPE: {
- MVT VT = cast<VTSDNode>(N)->getVT();
+ EVT VT = cast<VTSDNode>(N)->getVT();
if (VT.isExtended()) {
Erased = ExtendedValueTypeNodes.erase(VT);
} else {
- Erased = ValueTypeNodes[VT.getSimpleVT()] != 0;
- ValueTypeNodes[VT.getSimpleVT()] = 0;
+ Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
+ ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
}
break;
}
@@ -667,8 +652,8 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Flag &&
!N->isMachineOpcode() && !doNotCSE(N)) {
N->dump(this);
- cerr << "\n";
- assert(0 && "Node is not in map!");
+ errs() << "\n";
+ llvm_unreachable("Node is not in map!");
}
#endif
return Erased;
@@ -762,7 +747,7 @@ void SelectionDAG::VerifyNode(SDNode *N) {
default:
break;
case ISD::BUILD_PAIR: {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
assert(N->getNumValues() == 1 && "Too many results!");
assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
"Wrong return type!");
@@ -780,7 +765,7 @@ void SelectionDAG::VerifyNode(SDNode *N) {
assert(N->getValueType(0).isVector() && "Wrong return type!");
assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
"Wrong number of operands!");
- MVT EltVT = N->getValueType(0).getVectorElementType();
+ EVT EltVT = N->getValueType(0).getVectorElementType();
for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I)
assert((I->getValueType() == EltVT ||
(EltVT.isInteger() && I->getValueType().isInteger() &&
@@ -791,13 +776,13 @@ void SelectionDAG::VerifyNode(SDNode *N) {
}
}
-/// getMVTAlignment - Compute the default alignment value for the
+/// getEVTAlignment - Compute the default alignment value for the
/// given type.
///
-unsigned SelectionDAG::getMVTAlignment(MVT VT) const {
+unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
const Type *Ty = VT == MVT::iPTR ?
- PointerType::get(Type::Int8Ty, 0) :
- VT.getTypeForMVT();
+ PointerType::get(Type::getInt8Ty(*getContext()), 0) :
+ VT.getTypeForEVT(*getContext());
return TLI.getTargetData()->getABITypeAlignment(Ty);
}
@@ -815,6 +800,7 @@ void SelectionDAG::init(MachineFunction &mf, MachineModuleInfo *mmi,
MF = &mf;
MMI = mmi;
DW = dw;
+ Context = &mf.getFunction()->getContext();
}
SelectionDAG::~SelectionDAG() {
@@ -846,7 +832,19 @@ void SelectionDAG::clear() {
Root = getEntryNode();
}
-SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, MVT VT) {
+SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
+ return VT.bitsGT(Op.getValueType()) ?
+ getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
+ getNode(ISD::TRUNCATE, DL, VT, Op);
+}
+
+SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
+ return VT.bitsGT(Op.getValueType()) ?
+ getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
+ getNode(ISD::TRUNCATE, DL, VT, Op);
+}
+
+SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, EVT VT) {
if (Op.getValueType() == VT) return Op;
APInt Imm = APInt::getLowBitsSet(Op.getValueSizeInBits(),
VT.getSizeInBits());
@@ -856,29 +854,29 @@ SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, MVT VT) {
/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
///
-SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, MVT VT) {
- MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
+SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, EVT VT) {
+ EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
SDValue NegOne =
getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
return getNode(ISD::XOR, DL, VT, Val, NegOne);
}
-SDValue SelectionDAG::getConstant(uint64_t Val, MVT VT, bool isT) {
- MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
+SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
+ EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
assert((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
"getConstant with a uint64_t value that doesn't fit in the type!");
return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
}
-SDValue SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) {
- return getConstant(*ConstantInt::get(Val), VT, isT);
+SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
+ return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
}
-SDValue SelectionDAG::getConstant(const ConstantInt &Val, MVT VT, bool isT) {
+SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
assert(VT.isInteger() && "Cannot create FP integer constant!");
- MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
+ EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
assert(Val.getBitWidth() == EltVT.getSizeInBits() &&
"APInt size does not match type size!");
@@ -913,14 +911,14 @@ SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
}
-SDValue SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) {
- return getConstantFP(*ConstantFP::get(V), VT, isTarget);
+SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
+ return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
}
-SDValue SelectionDAG::getConstantFP(const ConstantFP& V, MVT VT, bool isTarget){
+SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
- MVT EltVT =
+ EVT EltVT =
VT.isVector() ? VT.getVectorElementType() : VT;
// Do the map lookup using the actual bit pattern for the floating point
@@ -953,8 +951,8 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, MVT VT, bool isTarget){
return Result;
}
-SDValue SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) {
- MVT EltVT =
+SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
+ EVT EltVT =
VT.isVector() ? VT.getVectorElementType() : VT;
if (EltVT==MVT::f32)
return getConstantFP(APFloat((float)Val), VT, isTarget);
@@ -963,14 +961,15 @@ SDValue SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) {
}
SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
- MVT VT, int64_t Offset,
+ EVT VT, int64_t Offset,
bool isTargetGA,
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTargetGA) &&
"Cannot set target flags on target-independent globals");
-
+
// Truncate (with sign-extension) the offset value to the pointer size.
- unsigned BitWidth = TLI.getPointerTy().getSizeInBits();
+ EVT PTy = TLI.getPointerTy();
+ unsigned BitWidth = PTy.getSizeInBits();
if (BitWidth < 64)
Offset = (Offset << (64 - BitWidth) >> (64 - BitWidth));
@@ -1002,7 +1001,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
return SDValue(N, 0);
}
-SDValue SelectionDAG::getFrameIndex(int FI, MVT VT, bool isTarget) {
+SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@@ -1017,7 +1016,7 @@ SDValue SelectionDAG::getFrameIndex(int FI, MVT VT, bool isTarget) {
return SDValue(N, 0);
}
-SDValue SelectionDAG::getJumpTable(int JTI, MVT VT, bool isTarget,
+SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent jump tables");
@@ -1036,9 +1035,9 @@ SDValue SelectionDAG::getJumpTable(int JTI, MVT VT, bool isTarget,
return SDValue(N, 0);
}
-SDValue SelectionDAG::getConstantPool(Constant *C, MVT VT,
+SDValue SelectionDAG::getConstantPool(Constant *C, EVT VT,
unsigned Alignment, int Offset,
- bool isTarget,
+ bool isTarget,
unsigned char TargetFlags) {
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
@@ -1062,7 +1061,7 @@ SDValue SelectionDAG::getConstantPool(Constant *C, MVT VT,
}
-SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, MVT VT,
+SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
unsigned Alignment, int Offset,
bool isTarget,
unsigned char TargetFlags) {
@@ -1101,26 +1100,13 @@ SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
return SDValue(N, 0);
}
-SDValue SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) {
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::ARG_FLAGS, getVTList(MVT::Other), 0, 0);
- ID.AddInteger(Flags.getRawBits());
- void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
- return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<ARG_FLAGSSDNode>();
- new (N) ARG_FLAGSSDNode(Flags);
- CSEMap.InsertNode(N, IP);
- AllNodes.push_back(N);
- return SDValue(N, 0);
-}
-
-SDValue SelectionDAG::getValueType(MVT VT) {
- if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size())
- ValueTypeNodes.resize(VT.getSimpleVT()+1);
+SDValue SelectionDAG::getValueType(EVT VT) {
+ if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
+ ValueTypeNodes.size())
+ ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
SDNode *&N = VT.isExtended() ?
- ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT()];
+ ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
if (N) return SDValue(N, 0);
N = NodeAllocator.Allocate<VTSDNode>();
@@ -1129,7 +1115,7 @@ SDValue SelectionDAG::getValueType(MVT VT) {
return SDValue(N, 0);
}
-SDValue SelectionDAG::getExternalSymbol(const char *Sym, MVT VT) {
+SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
SDNode *&N = ExternalSymbols[Sym];
if (N) return SDValue(N, 0);
N = NodeAllocator.Allocate<ExternalSymbolSDNode>();
@@ -1138,7 +1124,7 @@ SDValue SelectionDAG::getExternalSymbol(const char *Sym, MVT VT) {
return SDValue(N, 0);
}
-SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, MVT VT,
+SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
unsigned char TargetFlags) {
SDNode *&N =
TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
@@ -1177,19 +1163,19 @@ static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
}
}
-SDValue SelectionDAG::getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1,
+SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1,
SDValue N2, const int *Mask) {
assert(N1.getValueType() == N2.getValueType() && "Invalid VECTOR_SHUFFLE");
- assert(VT.isVector() && N1.getValueType().isVector() &&
+ assert(VT.isVector() && N1.getValueType().isVector() &&
"Vector Shuffle VTs must be a vectors");
assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType()
&& "Vector Shuffle VTs must have same element type");
// Canonicalize shuffle undef, undef -> undef
if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
- return N1;
+ return getUNDEF(VT);
- // Validate that all indices in Mask are within the range of the elements
+ // Validate that all indices in Mask are within the range of the elements
// input to the shuffle.
unsigned NElts = VT.getVectorNumElements();
SmallVector<int, 8> MaskVec;
@@ -1197,18 +1183,18 @@ SDValue SelectionDAG::getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1,
assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
MaskVec.push_back(Mask[i]);
}
-
+
// Canonicalize shuffle v, v -> v, undef
if (N1 == N2) {
N2 = getUNDEF(VT);
for (unsigned i = 0; i != NElts; ++i)
if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
}
-
+
// Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
if (N1.getOpcode() == ISD::UNDEF)
commuteShuffle(N1, N2, MaskVec);
-
+
// Canonicalize all index into lhs, -> shuffle lhs, undef
// Canonicalize all index into rhs, -> shuffle rhs, undef
bool AllLHS = true, AllRHS = true;
@@ -1231,7 +1217,7 @@ SDValue SelectionDAG::getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1,
N1 = getUNDEF(VT);
commuteShuffle(N1, N2, MaskVec);
}
-
+
// If Identity shuffle, or all shuffle in to undef, return that node.
bool AllUndef = true;
bool Identity = true;
@@ -1239,7 +1225,7 @@ SDValue SelectionDAG::getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1,
if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
if (MaskVec[i] >= 0) AllUndef = false;
}
- if (Identity)
+ if (Identity && NElts == N1.getValueType().getVectorNumElements())
return N1;
if (AllUndef)
return getUNDEF(VT);
@@ -1249,17 +1235,17 @@ SDValue SelectionDAG::getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1,
AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
for (unsigned i = 0; i != NElts; ++i)
ID.AddInteger(MaskVec[i]);
-
+
void* IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
-
+
// Allocate the mask array for the node out of the BumpPtrAllocator, since
// SDNode doesn't have access to it. This memory will be "leaked" when
// the node is deallocated, but recovered when the NodeAllocator is released.
int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
-
+
ShuffleVectorSDNode *N = NodeAllocator.Allocate<ShuffleVectorSDNode>();
new (N) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc);
CSEMap.InsertNode(N, IP);
@@ -1267,7 +1253,7 @@ SDValue SelectionDAG::getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1,
return SDValue(N, 0);
}
-SDValue SelectionDAG::getConvertRndSat(MVT VT, DebugLoc dl,
+SDValue SelectionDAG::getConvertRndSat(EVT VT, DebugLoc dl,
SDValue Val, SDValue DTy,
SDValue STy, SDValue Rnd, SDValue Sat,
ISD::CvtCode Code) {
@@ -1289,7 +1275,7 @@ SDValue SelectionDAG::getConvertRndSat(MVT VT, DebugLoc dl,
return SDValue(N, 0);
}
-SDValue SelectionDAG::getRegister(unsigned RegNo, MVT VT) {
+SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
ID.AddInteger(RegNo);
@@ -1305,7 +1291,7 @@ SDValue SelectionDAG::getRegister(unsigned RegNo, MVT VT) {
SDValue SelectionDAG::getDbgStopPoint(DebugLoc DL, SDValue Root,
unsigned Line, unsigned Col,
- Value *CU) {
+ MDNode *CU) {
SDNode *N = NodeAllocator.Allocate<DbgStopPointSDNode>();
new (N) DbgStopPointSDNode(Root, Line, Col, CU);
N->setDebugLoc(DL);
@@ -1349,32 +1335,10 @@ SDValue SelectionDAG::getSrcValue(const Value *V) {
return SDValue(N, 0);
}
-SDValue SelectionDAG::getMemOperand(const MachineMemOperand &MO) {
-#ifndef NDEBUG
- const Value *v = MO.getValue();
- assert((!v || isa<PointerType>(v->getType())) &&
- "SrcValue is not a pointer?");
-#endif
-
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::MEMOPERAND, getVTList(MVT::Other), 0, 0);
- MO.Profile(ID);
-
- void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
- return SDValue(E, 0);
-
- SDNode *N = NodeAllocator.Allocate<MemOperandSDNode>();
- new (N) MemOperandSDNode(MO);
- CSEMap.InsertNode(N, IP);
- AllNodes.push_back(N);
- return SDValue(N, 0);
-}
-
/// getShiftAmountOperand - Return the specified value casted to
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(SDValue Op) {
- MVT OpTy = Op.getValueType();
+ EVT OpTy = Op.getValueType();
MVT ShTy = TLI.getShiftAmountTy();
if (OpTy == ShTy || OpTy.isVector()) return Op;
@@ -1384,10 +1348,10 @@ SDValue SelectionDAG::getShiftAmountOperand(SDValue Op) {
/// CreateStackTemporary - Create a stack temporary, suitable for holding the
/// specified value type.
-SDValue SelectionDAG::CreateStackTemporary(MVT VT, unsigned minAlign) {
+SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
- unsigned ByteSize = VT.getStoreSizeInBits()/8;
- const Type *Ty = VT.getTypeForMVT();
+ unsigned ByteSize = VT.getStoreSize();
+ const Type *Ty = VT.getTypeForEVT(*getContext());
unsigned StackAlign =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
@@ -1397,11 +1361,11 @@ SDValue SelectionDAG::CreateStackTemporary(MVT VT, unsigned minAlign) {
/// CreateStackTemporary - Create a stack temporary suitable for holding
/// either of the specified value types.
-SDValue SelectionDAG::CreateStackTemporary(MVT VT1, MVT VT2) {
+SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
VT2.getStoreSizeInBits())/8;
- const Type *Ty1 = VT1.getTypeForMVT();
- const Type *Ty2 = VT2.getTypeForMVT();
+ const Type *Ty1 = VT1.getTypeForEVT(*getContext());
+ const Type *Ty2 = VT2.getTypeForEVT(*getContext());
const TargetData *TD = TLI.getTargetData();
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
TD->getPrefTypeAlignment(Ty2));
@@ -1411,7 +1375,7 @@ SDValue SelectionDAG::CreateStackTemporary(MVT VT1, MVT VT2) {
return getFrameIndex(FrameIdx, TLI.getPointerTy());
}
-SDValue SelectionDAG::FoldSetCC(MVT VT, SDValue N1,
+SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
SDValue N2, ISD::CondCode Cond, DebugLoc dl) {
// These setcc operations always fold.
switch (Cond) {
@@ -1441,7 +1405,7 @@ SDValue SelectionDAG::FoldSetCC(MVT VT, SDValue N1,
const APInt &C1 = N1C->getAPIntValue();
switch (Cond) {
- default: assert(0 && "Unknown integer setcc!");
+ default: llvm_unreachable("Unknown integer setcc!");
case ISD::SETEQ: return getConstant(C1 == C2, VT);
case ISD::SETNE: return getConstant(C1 != C2, VT);
case ISD::SETULT: return getConstant(C1.ult(C2), VT);
@@ -1516,6 +1480,10 @@ SDValue SelectionDAG::FoldSetCC(MVT VT, SDValue N1,
/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
/// use this predicate to simplify operations downstream.
bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
+ // This predicate is not safe for vector operations.
+ if (Op.getValueType().isVector())
+ return false;
+
unsigned BitWidth = Op.getValueSizeInBits();
return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
}
@@ -1743,7 +1711,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
}
return;
case ISD::SIGN_EXTEND_INREG: {
- MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
unsigned EBits = EVT.getSizeInBits();
// Sign extension. Compute the demanded bits in the result that are not
@@ -1788,14 +1756,14 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::LOAD: {
if (ISD::isZEXTLoad(Op.getNode())) {
LoadSDNode *LD = cast<LoadSDNode>(Op);
- MVT VT = LD->getMemoryVT();
+ EVT VT = LD->getMemoryVT();
unsigned MemBits = VT.getSizeInBits();
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
}
return;
}
case ISD::ZERO_EXTEND: {
- MVT InVT = Op.getOperand(0).getValueType();
+ EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getSizeInBits();
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
APInt InMask = Mask;
@@ -1809,7 +1777,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
}
case ISD::SIGN_EXTEND: {
- MVT InVT = Op.getOperand(0).getValueType();
+ EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getSizeInBits();
APInt InSignBit = APInt::getSignBit(InBits);
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
@@ -1850,7 +1818,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
}
case ISD::ANY_EXTEND: {
- MVT InVT = Op.getOperand(0).getValueType();
+ EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getSizeInBits();
APInt InMask = Mask;
InMask.trunc(InBits);
@@ -1862,7 +1830,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
}
case ISD::TRUNCATE: {
- MVT InVT = Op.getOperand(0).getValueType();
+ EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getSizeInBits();
APInt InMask = Mask;
InMask.zext(InBits);
@@ -1875,7 +1843,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
break;
}
case ISD::AssertZext: {
- MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero,
KnownOne, Depth+1);
@@ -1981,7 +1949,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
- TLI.computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne, *this);
+ TLI.computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne, *this,
+ Depth);
}
return;
}
@@ -1993,7 +1962,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
/// information. For example, immediately after an "SRA X, 2", we know that
/// the top 3 bits are all equal to each other, so we return 3.
unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
assert(VT.isInteger() && "Invalid VT!");
unsigned VTBits = VT.getSizeInBits();
unsigned Tmp, Tmp2;
@@ -2212,6 +2181,19 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
}
+bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
+ // If we're told that NaNs won't happen, assume they won't.
+ if (FiniteOnlyFPMath())
+ return true;
+
+ // If the value is a constant, we can obviously see if it is a NaN or not.
+ if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
+ return !C->getValueAPF().isNaN();
+
+ // TODO: Recognize more cases here.
+
+ return false;
+}
bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const {
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
@@ -2228,7 +2210,7 @@ bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const {
/// element of the result of the vector shuffle.
SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
unsigned i) {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
if (N->getMaskElt(i) < 0)
return getUNDEF(VT.getVectorElementType());
@@ -2239,7 +2221,7 @@ SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
if (V.getOpcode() == ISD::BIT_CONVERT) {
V = V.getOperand(0);
- MVT VVT = V.getValueType();
+ EVT VVT = V.getValueType();
if (!VVT.isVector() || VVT.getVectorNumElements() != (unsigned)NumElems)
return SDValue();
}
@@ -2256,7 +2238,7 @@ SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
/// getNode - Gets or creates the specified node.
///
-SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT) {
+SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
void *IP = 0;
@@ -2274,7 +2256,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT) {
}
SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
- MVT VT, SDValue Operand) {
+ EVT VT, SDValue Operand) {
// Constant fold unary operations with an integer constant operand.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
const APInt &Val = C->getAPIntValue();
@@ -2332,7 +2314,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
bool ignored;
// This can return overflow, underflow, or inexact; we don't care.
// FIXME need to be more flexible about rounding mode.
- (void)V.convert(*MVTToAPFloatSemantics(VT),
+ (void)V.convert(*EVTToAPFloatSemantics(VT),
APFloat::rmNearestTiesToEven, &ignored);
return getConstantFP(V, VT);
}
@@ -2366,7 +2348,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
case ISD::MERGE_VALUES:
case ISD::CONCAT_VECTORS:
return Operand; // Factor, merge or concat of one node? No need.
- case ISD::FP_ROUND: assert(0 && "Invalid method to make FP_ROUND node");
+ case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
case ISD::FP_EXTEND:
assert(VT.isFloatingPoint() &&
Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
@@ -2487,7 +2469,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
}
SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode,
- MVT VT,
+ EVT VT,
ConstantSDNode *Cst1,
ConstantSDNode *Cst2) {
const APInt &C1 = Cst1->getAPIntValue(), &C2 = Cst2->getAPIntValue();
@@ -2522,7 +2504,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode,
return SDValue();
}
-SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDValue N1, SDValue N2) {
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
@@ -2624,7 +2606,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
return N1;
break;
case ISD::FP_ROUND_INREG: {
- MVT EVT = cast<VTSDNode>(N2)->getVT();
+ EVT EVT = cast<VTSDNode>(N2)->getVT();
assert(VT == N1.getValueType() && "Not an inreg round!");
assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
"Cannot FP_ROUND_INREG integer types");
@@ -2641,7 +2623,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
break;
case ISD::AssertSext:
case ISD::AssertZext: {
- MVT EVT = cast<VTSDNode>(N2)->getVT();
+ EVT EVT = cast<VTSDNode>(N2)->getVT();
assert(VT == N1.getValueType() && "Not an inreg extend!");
assert(VT.isInteger() && EVT.isInteger() &&
"Cannot *_EXTEND_INREG FP types");
@@ -2650,7 +2632,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
break;
}
case ISD::SIGN_EXTEND_INREG: {
- MVT EVT = cast<VTSDNode>(N2)->getVT();
+ EVT EVT = cast<VTSDNode>(N2)->getVT();
assert(VT == N1.getValueType() && "Not an inreg extend!");
assert(VT.isInteger() && EVT.isInteger() &&
"Cannot *_EXTEND_INREG FP types");
@@ -2688,13 +2670,16 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
// expanding large vector constants.
if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
SDValue Elt = N1.getOperand(N2C->getZExtValue());
- if (Elt.getValueType() != VT) {
+ EVT VEltTy = N1.getValueType().getVectorElementType();
+ if (Elt.getValueType() != VEltTy) {
// If the vector element type is not legal, the BUILD_VECTOR operands
// are promoted and implicitly truncated. Make that explicit here.
- assert(VT.isInteger() && Elt.getValueType().isInteger() &&
- VT.bitsLE(Elt.getValueType()) &&
- "Bad type for BUILD_VECTOR operand");
- Elt = getNode(ISD::TRUNCATE, DL, VT, Elt);
+ Elt = getNode(ISD::TRUNCATE, DL, VEltTy, Elt);
+ }
+ if (VT != VEltTy) {
+ // If the vector element type is not legal, the EXTRACT_VECTOR_ELT
+ // result is implicitly extended.
+ Elt = getNode(ISD::ANY_EXTEND, DL, VT, Elt);
}
return Elt;
}
@@ -2895,7 +2880,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
return SDValue(N, 0);
}
-SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3) {
// Perform various simplifications.
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
@@ -2938,7 +2923,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
}
break;
case ISD::VECTOR_SHUFFLE:
- assert(0 && "should use getVectorShuffle constructor!");
+ llvm_unreachable("should use getVectorShuffle constructor!");
break;
case ISD::BIT_CONVERT:
// Fold bit_convert nodes from a type to themselves.
@@ -2971,23 +2956,46 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
return SDValue(N, 0);
}
-SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4) {
SDValue Ops[] = { N1, N2, N3, N4 };
return getNode(Opcode, DL, VT, Ops, 4);
}
-SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3,
SDValue N4, SDValue N5) {
SDValue Ops[] = { N1, N2, N3, N4, N5 };
return getNode(Opcode, DL, VT, Ops, 5);
}
+/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
+/// the incoming stack arguments to be loaded from the stack.
+SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
+ SmallVector<SDValue, 8> ArgChains;
+
+ // Include the original chain at the beginning of the list. When this is
+ // used by target LowerCall hooks, this helps legalize find the
+ // CALLSEQ_BEGIN node.
+ ArgChains.push_back(Chain);
+
+ // Add a chain value for each stack argument.
+ for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
+ UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
+ if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
+ if (FI->getIndex() < 0)
+ ArgChains.push_back(SDValue(L, 1));
+
+ // Build a tokenfactor for all the chains.
+ return getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
+ &ArgChains[0], ArgChains.size());
+}
+
/// getMemsetValue - Vectorized representation of the memset value
/// operand.
-static SDValue getMemsetValue(SDValue Value, MVT VT, SelectionDAG &DAG,
+static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
DebugLoc dl) {
unsigned NumBits = VT.isVector() ?
VT.getVectorElementType().getSizeInBits() : VT.getSizeInBits();
@@ -3021,9 +3029,9 @@ static SDValue getMemsetValue(SDValue Value, MVT VT, SelectionDAG &DAG,
/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
/// used when a memcpy is turned into a memset when the source is a constant
/// string ptr.
-static SDValue getMemsetStringVal(MVT VT, DebugLoc dl, SelectionDAG &DAG,
- const TargetLowering &TLI,
- std::string &Str, unsigned Offset) {
+static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
+ const TargetLowering &TLI,
+ std::string &Str, unsigned Offset) {
// Handle vector with all elements zero.
if (Str.empty()) {
if (VT.isInteger())
@@ -3031,7 +3039,8 @@ static SDValue getMemsetStringVal(MVT VT, DebugLoc dl, SelectionDAG &DAG,
unsigned NumElts = VT.getVectorNumElements();
MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
- DAG.getConstant(0, MVT::getVectorVT(EltVT, NumElts)));
+ DAG.getConstant(0,
+ EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts)));
}
assert(!VT.isVector() && "Can't handle vector type here!");
@@ -3051,7 +3060,7 @@ static SDValue getMemsetStringVal(MVT VT, DebugLoc dl, SelectionDAG &DAG,
///
static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset,
SelectionDAG &DAG) {
- MVT VT = Base.getValueType();
+ EVT VT = Base.getValueType();
return DAG.getNode(ISD::ADD, Base.getDebugLoc(),
VT, Base, DAG.getConstant(Offset, VT));
}
@@ -3083,7 +3092,7 @@ static bool isMemSrcFromString(SDValue Src, std::string &Str) {
/// to replace the memset / memcpy is below the threshold. It also returns the
/// types of the sequence of memory ops to perform memset / memcpy.
static
-bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps,
+bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
SDValue Dst, SDValue Src,
unsigned Limit, uint64_t Size, unsigned &Align,
std::string &Str, bool &isSrcStr,
@@ -3091,11 +3100,11 @@ bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps,
const TargetLowering &TLI) {
isSrcStr = isMemSrcFromString(Src, Str);
bool isSrcConst = isa<ConstantSDNode>(Src);
- bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses();
- MVT VT = TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr, DAG);
+ EVT VT = TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr, DAG);
+ bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses(VT);
if (VT != MVT::iAny) {
- unsigned NewAlign = (unsigned)
- TLI.getTargetData()->getABITypeAlignment(VT.getTypeForMVT());
+ const Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+ unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
// If source is a string constant, this will require an unaligned load.
if (NewAlign > Align && (isSrcConst || AllowUnalign)) {
if (Dst.getOpcode() != ISD::FrameIndex) {
@@ -3120,7 +3129,7 @@ bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps,
}
if (VT == MVT::iAny) {
- if (AllowUnalign) {
+ if (TLI.allowsUnalignedMemoryAccesses(MVT::i64)) {
VT = MVT::i64;
} else {
switch (Align & 7) {
@@ -3133,7 +3142,7 @@ bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps,
MVT LVT = MVT::i64;
while (!TLI.isTypeLegal(LVT))
- LVT = (MVT::SimpleValueType)(LVT.getSimpleVT() - 1);
+ LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
assert(LVT.isInteger());
if (VT.bitsGT(LVT))
@@ -3148,12 +3157,12 @@ bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps,
if (VT.isVector()) {
VT = MVT::i64;
while (!TLI.isTypeLegal(VT))
- VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1);
+ VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
VTSize = VT.getSizeInBits() / 8;
} else {
// This can result in a type that is not legal on the target, e.g.
// 1 or 2 bytes on PPC.
- VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1);
+ VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
VTSize >>= 1;
}
}
@@ -3177,7 +3186,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// Expand memcpy to a series of load and store ops if the size operand falls
// below a certain threshold.
- std::vector<MVT> MemOps;
+ std::vector<EVT> MemOps;
uint64_t Limit = -1ULL;
if (!AlwaysInline)
Limit = TLI.getMaxStoresPerMemcpy();
@@ -3193,8 +3202,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SmallVector<SDValue, 8> OutChains;
unsigned NumMemOps = MemOps.size();
uint64_t SrcOff = 0, DstOff = 0;
- for (unsigned i = 0; i < NumMemOps; i++) {
- MVT VT = MemOps[i];
+ for (unsigned i = 0; i != NumMemOps; ++i) {
+ EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value, Store;
@@ -3214,7 +3223,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// thing to do is generate a LoadExt/StoreTrunc pair. These simplify
// to Load/Store if NVT==VT.
// FIXME does the case above also need this?
- MVT NVT = TLI.getTypeToTransformTo(VT);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
assert(NVT.bitsGE(VT));
Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
@@ -3242,7 +3251,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// Expand memmove to a series of load and store ops if the size operand falls
// below a certain threshold.
- std::vector<MVT> MemOps;
+ std::vector<EVT> MemOps;
uint64_t Limit = -1ULL;
if (!AlwaysInline)
Limit = TLI.getMaxStoresPerMemmove();
@@ -3260,7 +3269,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
SmallVector<SDValue, 8> OutChains;
unsigned NumMemOps = MemOps.size();
for (unsigned i = 0; i < NumMemOps; i++) {
- MVT VT = MemOps[i];
+ EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value, Store;
@@ -3275,7 +3284,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
&LoadChains[0], LoadChains.size());
OutChains.clear();
for (unsigned i = 0; i < NumMemOps; i++) {
- MVT VT = MemOps[i];
+ EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value, Store;
@@ -3299,7 +3308,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
// Expand memset to a series of load/store ops if the size operand
// falls below a certain threshold.
- std::vector<MVT> MemOps;
+ std::vector<EVT> MemOps;
std::string Str;
bool CopyFromStr;
if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(),
@@ -3311,7 +3320,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
unsigned NumMemOps = MemOps.size();
for (unsigned i = 0; i < NumMemOps; i++) {
- MVT VT = MemOps[i];
+ EVT VT = MemOps[i];
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value = getMemsetValue(Src, VT, DAG, dl);
SDValue Store = DAG.getStore(Chain, dl, Value,
@@ -3368,15 +3377,18 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getTargetData()->getIntPtrType();
+ Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in DebugLoc
std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain, Type::VoidTy,
- false, false, false, false, 0, CallingConv::C, false,
- getExternalSymbol("memcpy", TLI.getPointerTy()),
+ TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
+ false, false, false, false, 0,
+ TLI.getLibcallCallingConv(RTLIB::MEMCPY), false,
+ /*isReturnValueUsed=*/false,
+ getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
+ TLI.getPointerTy()),
Args, *this, dl);
return CallResult.second;
}
@@ -3414,15 +3426,18 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getTargetData()->getIntPtrType();
+ Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
// FIXME: pass in DebugLoc
std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain, Type::VoidTy,
- false, false, false, false, 0, CallingConv::C, false,
- getExternalSymbol("memmove", TLI.getPointerTy()),
+ TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
+ false, false, false, false, 0,
+ TLI.getLibcallCallingConv(RTLIB::MEMMOVE), false,
+ /*isReturnValueUsed=*/false,
+ getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
+ TLI.getPointerTy()),
Args, *this, dl);
return CallResult.second;
}
@@ -3456,7 +3471,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
return Result;
// Emit a library call.
- const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType();
+ const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
@@ -3466,31 +3481,61 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
else
Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
- Entry.Node = Src; Entry.Ty = Type::Int32Ty; Entry.isSExt = true;
+ Entry.Node = Src;
+ Entry.Ty = Type::getInt32Ty(*getContext());
+ Entry.isSExt = true;
Args.push_back(Entry);
- Entry.Node = Size; Entry.Ty = IntPtrTy; Entry.isSExt = false;
+ Entry.Node = Size;
+ Entry.Ty = IntPtrTy;
+ Entry.isSExt = false;
Args.push_back(Entry);
// FIXME: pass in DebugLoc
std::pair<SDValue,SDValue> CallResult =
- TLI.LowerCallTo(Chain, Type::VoidTy,
- false, false, false, false, 0, CallingConv::C, false,
- getExternalSymbol("memset", TLI.getPointerTy()),
+ TLI.LowerCallTo(Chain, Type::getVoidTy(*getContext()),
+ false, false, false, false, 0,
+ TLI.getLibcallCallingConv(RTLIB::MEMSET), false,
+ /*isReturnValueUsed=*/false,
+ getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
+ TLI.getPointerTy()),
Args, *this, dl);
return CallResult.second;
}
-SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT,
+SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
SDValue Chain,
SDValue Ptr, SDValue Cmp,
SDValue Swp, const Value* PtrVal,
unsigned Alignment) {
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ Alignment = getEVTAlignment(MemVT);
+
+ // Check if the memory reference references a frame index
+ if (!PtrVal)
+ if (const FrameIndexSDNode *FI =
+ dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
+ PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
+
+ MachineFunction &MF = getMachineFunction();
+ unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+
+ // For now, atomics are considered to be volatile always.
+ Flags |= MachineMemOperand::MOVolatile;
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(PtrVal, Flags, 0,
+ MemVT.getStoreSize(), Alignment);
+
+ return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO);
+}
+
+SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
+ SDValue Chain,
+ SDValue Ptr, SDValue Cmp,
+ SDValue Swp, MachineMemOperand *MMO) {
assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
- MVT VT = Cmp.getValueType();
-
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = getMVTAlignment(MemVT);
+ EVT VT = Cmp.getValueType();
SDVTList VTs = getVTList(VT, MVT::Other);
FoldingSetNodeID ID;
@@ -3498,21 +3543,48 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT,
SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
void* IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
+ cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
+ }
SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
- new (N) AtomicSDNode(Opcode, dl, VTs, MemVT,
- Chain, Ptr, Cmp, Swp, PtrVal, Alignment);
+ new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Cmp, Swp, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
-SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT,
+SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
SDValue Chain,
SDValue Ptr, SDValue Val,
const Value* PtrVal,
unsigned Alignment) {
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ Alignment = getEVTAlignment(MemVT);
+
+ // Check if the memory reference references a frame index
+ if (!PtrVal)
+ if (const FrameIndexSDNode *FI =
+ dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
+ PtrVal = PseudoSourceValue::getFixedStack(FI->getIndex());
+
+ MachineFunction &MF = getMachineFunction();
+ unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+
+ // For now, atomics are considered to be volatile always.
+ Flags |= MachineMemOperand::MOVolatile;
+
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(PtrVal, Flags, 0,
+ MemVT.getStoreSize(), Alignment);
+
+ return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
+}
+
+SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
+ SDValue Chain,
+ SDValue Ptr, SDValue Val,
+ MachineMemOperand *MMO) {
assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
Opcode == ISD::ATOMIC_LOAD_SUB ||
Opcode == ISD::ATOMIC_LOAD_AND ||
@@ -3526,10 +3598,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT,
Opcode == ISD::ATOMIC_SWAP) &&
"Invalid Atomic Op");
- MVT VT = Val.getValueType();
-
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = getMVTAlignment(MemVT);
+ EVT VT = Val.getValueType();
SDVTList VTs = getVTList(VT, MVT::Other);
FoldingSetNodeID ID;
@@ -3537,11 +3606,12 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT,
SDValue Ops[] = {Chain, Ptr, Val};
AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
void* IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
+ cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
+ }
SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
- new (N) AtomicSDNode(Opcode, dl, VTs, MemVT,
- Chain, Ptr, Val, PtrVal, Alignment);
+ new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Val, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -3554,7 +3624,7 @@ SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
if (NumOps == 1)
return Ops[0];
- SmallVector<MVT, 4> VTs;
+ SmallVector<EVT, 4> VTs;
VTs.reserve(NumOps);
for (unsigned i = 0; i < NumOps; ++i)
VTs.push_back(Ops[i].getValueType());
@@ -3564,9 +3634,9 @@ SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
- const MVT *VTs, unsigned NumVTs,
+ const EVT *VTs, unsigned NumVTs,
const SDValue *Ops, unsigned NumOps,
- MVT MemVT, const Value *srcValue, int SVOff,
+ EVT MemVT, const Value *srcValue, int SVOff,
unsigned Align, bool Vol,
bool ReadMem, bool WriteMem) {
return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
@@ -3577,81 +3647,104 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
SDValue
SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
const SDValue *Ops, unsigned NumOps,
- MVT MemVT, const Value *srcValue, int SVOff,
+ EVT MemVT, const Value *srcValue, int SVOff,
unsigned Align, bool Vol,
bool ReadMem, bool WriteMem) {
+ if (Align == 0) // Ensure that codegen never sees alignment 0
+ Align = getEVTAlignment(MemVT);
+
+ MachineFunction &MF = getMachineFunction();
+ unsigned Flags = 0;
+ if (WriteMem)
+ Flags |= MachineMemOperand::MOStore;
+ if (ReadMem)
+ Flags |= MachineMemOperand::MOLoad;
+ if (Vol)
+ Flags |= MachineMemOperand::MOVolatile;
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(srcValue, Flags, SVOff,
+ MemVT.getStoreSize(), Align);
+
+ return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
+}
+
+SDValue
+SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
+ const SDValue *Ops, unsigned NumOps,
+ EVT MemVT, MachineMemOperand *MMO) {
+ assert((Opcode == ISD::INTRINSIC_VOID ||
+ Opcode == ISD::INTRINSIC_W_CHAIN ||
+ (Opcode <= INT_MAX &&
+ (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
+ "Opcode is not a memory-accessing opcode!");
+
// Memoize the node unless it returns a flag.
MemIntrinsicSDNode *N;
if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
+ cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
+ }
N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
- new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT,
- srcValue, SVOff, Align, Vol, ReadMem, WriteMem);
+ new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
CSEMap.InsertNode(N, IP);
} else {
N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
- new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT,
- srcValue, SVOff, Align, Vol, ReadMem, WriteMem);
- }
- AllNodes.push_back(N);
- return SDValue(N, 0);
-}
-
-SDValue
-SelectionDAG::getCall(unsigned CallingConv, DebugLoc dl, bool IsVarArgs,
- bool IsTailCall, bool IsInreg, SDVTList VTs,
- const SDValue *Operands, unsigned NumOperands,
- unsigned NumFixedArgs) {
- // Do not include isTailCall in the folding set profile.
- FoldingSetNodeID ID;
- AddNodeIDNode(ID, ISD::CALL, VTs, Operands, NumOperands);
- ID.AddInteger(CallingConv);
- ID.AddInteger(IsVarArgs);
- void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
- // Instead of including isTailCall in the folding set, we just
- // set the flag of the existing node.
- if (!IsTailCall)
- cast<CallSDNode>(E)->setNotTailCall();
- return SDValue(E, 0);
+ new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
}
- SDNode *N = NodeAllocator.Allocate<CallSDNode>();
- new (N) CallSDNode(CallingConv, dl, IsVarArgs, IsTailCall, IsInreg,
- VTs, Operands, NumOperands, NumFixedArgs);
- CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
- ISD::LoadExtType ExtType, MVT VT, SDValue Chain,
+ ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
SDValue Ptr, SDValue Offset,
- const Value *SV, int SVOffset, MVT EVT,
+ const Value *SV, int SVOffset, EVT MemVT,
bool isVolatile, unsigned Alignment) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = getMVTAlignment(VT);
+ Alignment = getEVTAlignment(VT);
+
+ // Check if the memory reference references a frame index
+ if (!SV)
+ if (const FrameIndexSDNode *FI =
+ dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
+ SV = PseudoSourceValue::getFixedStack(FI->getIndex());
+
+ MachineFunction &MF = getMachineFunction();
+ unsigned Flags = MachineMemOperand::MOLoad;
+ if (isVolatile)
+ Flags |= MachineMemOperand::MOVolatile;
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(SV, Flags, SVOffset,
+ MemVT.getStoreSize(), Alignment);
+ return getLoad(AM, dl, ExtType, VT, Chain, Ptr, Offset, MemVT, MMO);
+}
- if (VT == EVT) {
+SDValue
+SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
+ ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
+ SDValue Ptr, SDValue Offset, EVT MemVT,
+ MachineMemOperand *MMO) {
+ if (VT == MemVT) {
ExtType = ISD::NON_EXTLOAD;
} else if (ExtType == ISD::NON_EXTLOAD) {
- assert(VT == EVT && "Non-extending load from different memory type!");
+ assert(VT == MemVT && "Non-extending load from different memory type!");
} else {
// Extending load.
if (VT.isVector())
- assert(EVT.getVectorNumElements() == VT.getVectorNumElements() &&
+ assert(MemVT.getVectorNumElements() == VT.getVectorNumElements() &&
"Invalid vector extload!");
else
- assert(EVT.bitsLT(VT) &&
+ assert(MemVT.bitsLT(VT) &&
"Should only be an extending load, not truncating!");
assert((ExtType == ISD::EXTLOAD || VT.isInteger()) &&
"Cannot sign/zero extend a FP/Vector load!");
- assert(VT.isInteger() == EVT.isInteger() &&
+ assert(VT.isInteger() == MemVT.isInteger() &&
"Cannot convert from FP to Int or Int -> FP!");
}
@@ -3664,20 +3757,21 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
SDValue Ops[] = { Chain, Ptr, Offset };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
- ID.AddInteger(EVT.getRawBits());
- ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, isVolatile, Alignment));
+ ID.AddInteger(MemVT.getRawBits());
+ ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile()));
void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
+ cast<LoadSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
+ }
SDNode *N = NodeAllocator.Allocate<LoadSDNode>();
- new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, EVT, SV, SVOffset,
- Alignment, isVolatile);
+ new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, MemVT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
-SDValue SelectionDAG::getLoad(MVT VT, DebugLoc dl,
+SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
const Value *SV, int SVOffset,
bool isVolatile, unsigned Alignment) {
@@ -3686,14 +3780,14 @@ SDValue SelectionDAG::getLoad(MVT VT, DebugLoc dl,
SV, SVOffset, VT, isVolatile, Alignment);
}
-SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, MVT VT,
+SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
SDValue Chain, SDValue Ptr,
const Value *SV,
- int SVOffset, MVT EVT,
+ int SVOffset, EVT MemVT,
bool isVolatile, unsigned Alignment) {
SDValue Undef = getUNDEF(Ptr.getValueType());
return getLoad(ISD::UNINDEXED, dl, ExtType, VT, Chain, Ptr, Undef,
- SV, SVOffset, EVT, isVolatile, Alignment);
+ SV, SVOffset, MemVT, isVolatile, Alignment);
}
SDValue
@@ -3711,25 +3805,43 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, const Value *SV, int SVOffset,
bool isVolatile, unsigned Alignment) {
- MVT VT = Val.getValueType();
-
if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = getMVTAlignment(VT);
+ Alignment = getEVTAlignment(Val.getValueType());
+ // Check if the memory reference references a frame index
+ if (!SV)
+ if (const FrameIndexSDNode *FI =
+ dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
+ SV = PseudoSourceValue::getFixedStack(FI->getIndex());
+
+ MachineFunction &MF = getMachineFunction();
+ unsigned Flags = MachineMemOperand::MOStore;
+ if (isVolatile)
+ Flags |= MachineMemOperand::MOVolatile;
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(SV, Flags, SVOffset,
+ Val.getValueType().getStoreSize(), Alignment);
+
+ return getStore(Chain, dl, Val, Ptr, MMO);
+}
+
+SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
+ SDValue Ptr, MachineMemOperand *MMO) {
+ EVT VT = Val.getValueType();
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
ID.AddInteger(VT.getRawBits());
- ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED,
- isVolatile, Alignment));
+ ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile()));
void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
+ cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
+ }
SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
- new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false,
- VT, SV, SVOffset, Alignment, isVolatile);
+ new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false, VT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -3737,19 +3849,39 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
SDValue Ptr, const Value *SV,
- int SVOffset, MVT SVT,
+ int SVOffset, EVT SVT,
bool isVolatile, unsigned Alignment) {
- MVT VT = Val.getValueType();
+ if (Alignment == 0) // Ensure that codegen never sees alignment 0
+ Alignment = getEVTAlignment(SVT);
+
+ // Check if the memory reference references a frame index
+ if (!SV)
+ if (const FrameIndexSDNode *FI =
+ dyn_cast<const FrameIndexSDNode>(Ptr.getNode()))
+ SV = PseudoSourceValue::getFixedStack(FI->getIndex());
+
+ MachineFunction &MF = getMachineFunction();
+ unsigned Flags = MachineMemOperand::MOStore;
+ if (isVolatile)
+ Flags |= MachineMemOperand::MOVolatile;
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(SV, Flags, SVOffset, SVT.getStoreSize(), Alignment);
+
+ return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
+}
+
+SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
+ SDValue Ptr, EVT SVT,
+ MachineMemOperand *MMO) {
+ EVT VT = Val.getValueType();
if (VT == SVT)
- return getStore(Chain, dl, Val, Ptr, SV, SVOffset, isVolatile, Alignment);
+ return getStore(Chain, dl, Val, Ptr, MMO);
assert(VT.bitsGT(SVT) && "Not a truncation?");
assert(VT.isInteger() == SVT.isInteger() &&
"Can't do FP-INT conversion!");
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = getMVTAlignment(VT);
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
@@ -3757,14 +3889,14 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
ID.AddInteger(SVT.getRawBits());
- ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED,
- isVolatile, Alignment));
+ ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile()));
void *IP = 0;
- if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
+ cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
+ }
SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
- new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true,
- SVT, SV, SVOffset, Alignment, isVolatile);
+ new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true, SVT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -3788,21 +3920,20 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
new (N) StoreSDNode(Ops, dl, VTs, AM,
ST->isTruncatingStore(), ST->getMemoryVT(),
- ST->getSrcValue(), ST->getSrcValueOffset(),
- ST->getAlignment(), ST->isVolatile());
+ ST->getMemOperand());
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
-SDValue SelectionDAG::getVAArg(MVT VT, DebugLoc dl,
+SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
SDValue SV) {
SDValue Ops[] = { Chain, Ptr, SV };
return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 3);
}
-SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
const SDUse *Ops, unsigned NumOps) {
switch (NumOps) {
case 0: return getNode(Opcode, DL, VT);
@@ -3818,7 +3949,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
}
-SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
const SDValue *Ops, unsigned NumOps) {
switch (NumOps) {
case 0: return getNode(Opcode, DL, VT);
@@ -3876,14 +4007,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, MVT VT,
}
SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
- const std::vector<MVT> &ResultTys,
+ const std::vector<EVT> &ResultTys,
const SDValue *Ops, unsigned NumOps) {
return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
Ops, NumOps);
}
SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
- const MVT *VTs, unsigned NumVTs,
+ const EVT *VTs, unsigned NumVTs,
const SDValue *Ops, unsigned NumOps) {
if (NumVTs == 1)
return getNode(Opcode, DL, VTs[0], Ops, NumOps);
@@ -3895,11 +4026,11 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
if (VTList.NumVTs == 1)
return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
+#if 0
switch (Opcode) {
// FIXME: figure out how to safely handle things like
// int foo(int x) { return 1 << (x & 255); }
// int bar() { return foo(256); }
-#if 0
case ISD::SRA_PARTS:
case ISD::SRL_PARTS:
case ISD::SHL_PARTS:
@@ -3915,8 +4046,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
}
break;
-#endif
}
+#endif
// Memoize the node unless it returns a flag.
SDNode *N;
@@ -3998,17 +4129,17 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
return getNode(Opcode, DL, VTList, Ops, 5);
}
-SDVTList SelectionDAG::getVTList(MVT VT) {
+SDVTList SelectionDAG::getVTList(EVT VT) {
return makeVTList(SDNode::getValueTypeList(VT), 1);
}
-SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2) {
+SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
E = VTList.rend(); I != E; ++I)
if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2)
return *I;
- MVT *Array = Allocator.Allocate<MVT>(2);
+ EVT *Array = Allocator.Allocate<EVT>(2);
Array[0] = VT1;
Array[1] = VT2;
SDVTList Result = makeVTList(Array, 2);
@@ -4016,14 +4147,14 @@ SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2) {
return Result;
}
-SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2, MVT VT3) {
+SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
E = VTList.rend(); I != E; ++I)
if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
I->VTs[2] == VT3)
return *I;
- MVT *Array = Allocator.Allocate<MVT>(3);
+ EVT *Array = Allocator.Allocate<EVT>(3);
Array[0] = VT1;
Array[1] = VT2;
Array[2] = VT3;
@@ -4032,14 +4163,14 @@ SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2, MVT VT3) {
return Result;
}
-SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2, MVT VT3, MVT VT4) {
+SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
E = VTList.rend(); I != E; ++I)
if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
I->VTs[2] == VT3 && I->VTs[3] == VT4)
return *I;
- MVT *Array = Allocator.Allocate<MVT>(3);
+ EVT *Array = Allocator.Allocate<EVT>(3);
Array[0] = VT1;
Array[1] = VT2;
Array[2] = VT3;
@@ -4049,9 +4180,9 @@ SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2, MVT VT3, MVT VT4) {
return Result;
}
-SDVTList SelectionDAG::getVTList(const MVT *VTs, unsigned NumVTs) {
+SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
switch (NumVTs) {
- case 0: assert(0 && "Cannot have nodes without results!");
+ case 0: llvm_unreachable("Cannot have nodes without results!");
case 1: return getVTList(VTs[0]);
case 2: return getVTList(VTs[0], VTs[1]);
case 3: return getVTList(VTs[0], VTs[1], VTs[2]);
@@ -4073,7 +4204,7 @@ SDVTList SelectionDAG::getVTList(const MVT *VTs, unsigned NumVTs) {
return *I;
}
- MVT *Array = Allocator.Allocate<MVT>(NumVTs);
+ EVT *Array = Allocator.Allocate<EVT>(NumVTs);
std::copy(VTs, VTs+NumVTs, Array);
SDVTList Result = makeVTList(Array, NumVTs);
VTList.push_back(Result);
@@ -4215,20 +4346,20 @@ void SDNode::DropOperands() {
/// machine opcode.
///
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT) {
+ EVT VT) {
SDVTList VTs = getVTList(VT);
return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT, SDValue Op1) {
+ EVT VT, SDValue Op1) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1 };
return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT, SDValue Op1,
+ EVT VT, SDValue Op1,
SDValue Op2) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2 };
@@ -4236,7 +4367,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT, SDValue Op1,
+ EVT VT, SDValue Op1,
SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2, Op3 };
@@ -4244,41 +4375,41 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT, const SDValue *Ops,
+ EVT VT, const SDValue *Ops,
unsigned NumOps) {
SDVTList VTs = getVTList(VT);
return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2, const SDValue *Ops,
+ EVT VT1, EVT VT2, const SDValue *Ops,
unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2);
return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2) {
+ EVT VT1, EVT VT2) {
SDVTList VTs = getVTList(VT1, VT2);
return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2, MVT VT3,
+ EVT VT1, EVT VT2, EVT VT3,
const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2, MVT VT3, MVT VT4,
+ EVT VT1, EVT VT2, EVT VT3, EVT VT4,
const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2,
+ EVT VT1, EVT VT2,
SDValue Op1) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1 };
@@ -4286,7 +4417,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2,
+ EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2 };
@@ -4294,7 +4425,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2,
+ EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2);
@@ -4303,7 +4434,7 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
}
SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
- MVT VT1, MVT VT2, MVT VT3,
+ EVT VT1, EVT VT2, EVT VT3,
SDValue Op1, SDValue Op2,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
@@ -4318,20 +4449,20 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT) {
+ EVT VT) {
SDVTList VTs = getVTList(VT);
return MorphNodeTo(N, Opc, VTs, 0, 0);
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT, SDValue Op1) {
+ EVT VT, SDValue Op1) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1 };
return MorphNodeTo(N, Opc, VTs, Ops, 1);
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT, SDValue Op1,
+ EVT VT, SDValue Op1,
SDValue Op2) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2 };
@@ -4339,7 +4470,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT, SDValue Op1,
+ EVT VT, SDValue Op1,
SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT);
SDValue Ops[] = { Op1, Op2, Op3 };
@@ -4347,34 +4478,34 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT, const SDValue *Ops,
+ EVT VT, const SDValue *Ops,
unsigned NumOps) {
SDVTList VTs = getVTList(VT);
return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT1, MVT VT2, const SDValue *Ops,
+ EVT VT1, EVT VT2, const SDValue *Ops,
unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2);
return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT1, MVT VT2) {
+ EVT VT1, EVT VT2) {
SDVTList VTs = getVTList(VT1, VT2);
return MorphNodeTo(N, Opc, VTs, (SDValue *)0, 0);
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT1, MVT VT2, MVT VT3,
+ EVT VT1, EVT VT2, EVT VT3,
const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
return MorphNodeTo(N, Opc, VTs, Ops, NumOps);
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT1, MVT VT2,
+ EVT VT1, EVT VT2,
SDValue Op1) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1 };
@@ -4382,7 +4513,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT1, MVT VT2,
+ EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2 };
@@ -4390,7 +4521,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
}
SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
- MVT VT1, MVT VT2,
+ EVT VT1, EVT VT2,
SDValue Op1, SDValue Op2,
SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2);
@@ -4441,29 +4572,35 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
DeadNodeSet.insert(Used);
}
- // If NumOps is larger than the # of operands we currently have, reallocate
- // the operand list.
- if (NumOps > N->NumOperands) {
- if (N->OperandsNeedDelete)
- delete[] N->OperandList;
-
- if (N->isMachineOpcode()) {
- // We're creating a final node that will live unmorphed for the
- // remainder of the current SelectionDAG iteration, so we can allocate
- // the operands directly out of a pool with no recycling metadata.
- N->OperandList = OperandAllocator.Allocate<SDUse>(NumOps);
- N->OperandsNeedDelete = false;
- } else {
- N->OperandList = new SDUse[NumOps];
+ if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
+ // Initialize the memory references information.
+ MN->setMemRefs(0, 0);
+ // If NumOps is larger than the # of operands we can have in a
+ // MachineSDNode, reallocate the operand list.
+ if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
+ if (MN->OperandsNeedDelete)
+ delete[] MN->OperandList;
+ if (NumOps > array_lengthof(MN->LocalOperands))
+ // We're creating a final node that will live unmorphed for the
+ // remainder of the current SelectionDAG iteration, so we can allocate
+ // the operands directly out of a pool with no recycling metadata.
+ MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
+ Ops, NumOps);
+ else
+ MN->InitOperands(MN->LocalOperands, Ops, NumOps);
+ MN->OperandsNeedDelete = false;
+ } else
+ MN->InitOperands(MN->OperandList, Ops, NumOps);
+ } else {
+ // If NumOps is larger than the # of operands we currently have, reallocate
+ // the operand list.
+ if (NumOps > N->NumOperands) {
+ if (N->OperandsNeedDelete)
+ delete[] N->OperandList;
+ N->InitOperands(new SDUse[NumOps], Ops, NumOps);
N->OperandsNeedDelete = true;
- }
- }
-
- // Assign the new operands.
- N->NumOperands = NumOps;
- for (unsigned i = 0, e = NumOps; i != e; ++i) {
- N->OperandList[i].setUser(N);
- N->OperandList[i].setInitial(Ops[i]);
+ } else
+ MN->InitOperands(MN->OperandList, Ops, NumOps);
}
// Delete any nodes that are still dead after adding the uses for the
@@ -4481,108 +4618,189 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
}
-/// getTargetNode - These are used for target selectors to create a new node
-/// with specified return type(s), target opcode, and operands.
+/// getMachineNode - These are used for target selectors to create a new node
+/// with specified return type(s), MachineInstr opcode, and operands.
///
-/// Note that getTargetNode returns the resultant node. If there is already a
+/// Note that getMachineNode returns the resultant node. If there is already a
/// node of the specified opcode and operands, it returns that node instead of
/// the current one.
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT) {
- return getNode(~Opcode, dl, VT).getNode();
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT) {
+ SDVTList VTs = getVTList(VT);
+ return getMachineNode(Opcode, dl, VTs, 0, 0);
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
- SDValue Op1) {
- return getNode(~Opcode, dl, VT, Op1).getNode();
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1) {
+ SDVTList VTs = getVTList(VT);
+ SDValue Ops[] = { Op1 };
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
- SDValue Op1, SDValue Op2) {
- return getNode(~Opcode, dl, VT, Op1, Op2).getNode();
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
+ SDValue Op1, SDValue Op2) {
+ SDVTList VTs = getVTList(VT);
+ SDValue Ops[] = { Op1, Op2 };
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
- SDValue Op1, SDValue Op2,
- SDValue Op3) {
- return getNode(~Opcode, dl, VT, Op1, Op2, Op3).getNode();
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
+ SDValue Op1, SDValue Op2, SDValue Op3) {
+ SDVTList VTs = getVTList(VT);
+ SDValue Ops[] = { Op1, Op2, Op3 };
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
- const SDValue *Ops, unsigned NumOps) {
- return getNode(~Opcode, dl, VT, Ops, NumOps).getNode();
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
+ const SDValue *Ops, unsigned NumOps) {
+ SDVTList VTs = getVTList(VT);
+ return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
- MVT VT1, MVT VT2) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2) {
SDVTList VTs = getVTList(VT1, VT2);
- SDValue Op;
- return getNode(~Opcode, dl, VTs, &Op, 0).getNode();
+ return getMachineNode(Opcode, dl, VTs, 0, 0);
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
- MVT VT2, SDValue Op1) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ EVT VT1, EVT VT2, SDValue Op1) {
SDVTList VTs = getVTList(VT1, VT2);
- return getNode(~Opcode, dl, VTs, &Op1, 1).getNode();
+ SDValue Ops[] = { Op1 };
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
- MVT VT2, SDValue Op1,
- SDValue Op2) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2 };
- return getNode(~Opcode, dl, VTs, Ops, 2).getNode();
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
- MVT VT2, SDValue Op1,
- SDValue Op2, SDValue Op3) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ EVT VT1, EVT VT2, SDValue Op1,
+ SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2);
SDValue Ops[] = { Op1, Op2, Op3 };
- return getNode(~Opcode, dl, VTs, Ops, 3).getNode();
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
- MVT VT1, MVT VT2,
- const SDValue *Ops, unsigned NumOps) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ EVT VT1, EVT VT2,
+ const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2);
- return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
+ return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
- MVT VT1, MVT VT2, MVT VT3,
- SDValue Op1, SDValue Op2) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ EVT VT1, EVT VT2, EVT VT3,
+ SDValue Op1, SDValue Op2) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2 };
- return getNode(~Opcode, dl, VTs, Ops, 2).getNode();
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
- MVT VT1, MVT VT2, MVT VT3,
- SDValue Op1, SDValue Op2,
- SDValue Op3) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ EVT VT1, EVT VT2, EVT VT3,
+ SDValue Op1, SDValue Op2, SDValue Op3) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
SDValue Ops[] = { Op1, Op2, Op3 };
- return getNode(~Opcode, dl, VTs, Ops, 3).getNode();
+ return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
- MVT VT1, MVT VT2, MVT VT3,
- const SDValue *Ops, unsigned NumOps) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ EVT VT1, EVT VT2, EVT VT3,
+ const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2, VT3);
- return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
+ return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
- MVT VT2, MVT VT3, MVT VT4,
- const SDValue *Ops, unsigned NumOps) {
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
+ EVT VT2, EVT VT3, EVT VT4,
+ const SDValue *Ops, unsigned NumOps) {
SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
- return getNode(~Opcode, dl, VTs, Ops, NumOps).getNode();
+ return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
}
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
- const std::vector<MVT> &ResultTys,
- const SDValue *Ops, unsigned NumOps) {
- return getNode(~Opcode, dl, ResultTys, Ops, NumOps).getNode();
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
+ const std::vector<EVT> &ResultTys,
+ const SDValue *Ops, unsigned NumOps) {
+ SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
+ return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
+}
+
+MachineSDNode *
+SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
+ const SDValue *Ops, unsigned NumOps) {
+ bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Flag;
+ MachineSDNode *N;
+ void *IP;
+
+ if (DoCSE) {
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
+ IP = 0;
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ return cast<MachineSDNode>(E);
+ }
+
+ // Allocate a new MachineSDNode.
+ N = NodeAllocator.Allocate<MachineSDNode>();
+ new (N) MachineSDNode(~Opcode, DL, VTs);
+
+ // Initialize the operands list.
+ if (NumOps > array_lengthof(N->LocalOperands))
+ // We're creating a final node that will live unmorphed for the
+ // remainder of the current SelectionDAG iteration, so we can allocate
+ // the operands directly out of a pool with no recycling metadata.
+ N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
+ Ops, NumOps);
+ else
+ N->InitOperands(N->LocalOperands, Ops, NumOps);
+ N->OperandsNeedDelete = false;
+
+ if (DoCSE)
+ CSEMap.InsertNode(N, IP);
+
+ AllNodes.push_back(N);
+#ifndef NDEBUG
+ VerifyNode(N);
+#endif
+ return N;
+}
+
+/// getTargetExtractSubreg - A convenience function for creating
+/// TargetInstrInfo::EXTRACT_SUBREG nodes.
+SDValue
+SelectionDAG::getTargetExtractSubreg(int SRIdx, DebugLoc DL, EVT VT,
+ SDValue Operand) {
+ SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
+ SDNode *Subreg = getMachineNode(TargetInstrInfo::EXTRACT_SUBREG, DL,
+ VT, Operand, SRIdxVal);
+ return SDValue(Subreg, 0);
+}
+
+/// getTargetInsertSubreg - A convenience function for creating
+/// TargetInstrInfo::INSERT_SUBREG nodes.
+SDValue
+SelectionDAG::getTargetInsertSubreg(int SRIdx, DebugLoc DL, EVT VT,
+ SDValue Operand, SDValue Subreg) {
+ SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
+ SDNode *Result = getMachineNode(TargetInstrInfo::INSERT_SUBREG, DL,
+ VT, Operand, Subreg, SRIdxVal);
+ return SDValue(Result, 0);
}
/// getNodeIfExists - Get the specified node if it's already available, or
@@ -4937,64 +5155,28 @@ HandleSDNode::~HandleSDNode() {
}
GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA,
- MVT VT, int64_t o, unsigned char TF)
+ EVT VT, int64_t o, unsigned char TF)
: SDNode(Opc, DebugLoc::getUnknownLoc(), getSDVTList(VT)),
Offset(o), TargetFlags(TF) {
TheGlobal = const_cast<GlobalValue*>(GA);
}
-MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, MVT memvt,
- const Value *srcValue, int SVO,
- unsigned alignment, bool vol)
- : SDNode(Opc, dl, VTs), MemoryVT(memvt), SrcValue(srcValue), SVOffset(SVO) {
- SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, vol, alignment);
- assert(isPowerOf2_32(alignment) && "Alignment is not a power of 2!");
- assert(getAlignment() == alignment && "Alignment representation error!");
- assert(isVolatile() == vol && "Volatile representation error!");
+MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
+ MachineMemOperand *mmo)
+ : SDNode(Opc, dl, VTs), MemoryVT(memvt), MMO(mmo) {
+ SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile());
+ assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
+ assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
}
MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
- const SDValue *Ops,
- unsigned NumOps, MVT memvt, const Value *srcValue,
- int SVO, unsigned alignment, bool vol)
+ const SDValue *Ops, unsigned NumOps, EVT memvt,
+ MachineMemOperand *mmo)
: SDNode(Opc, dl, VTs, Ops, NumOps),
- MemoryVT(memvt), SrcValue(srcValue), SVOffset(SVO) {
- SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, vol, alignment);
- assert(isPowerOf2_32(alignment) && "Alignment is not a power of 2!");
- assert(getAlignment() == alignment && "Alignment representation error!");
- assert(isVolatile() == vol && "Volatile representation error!");
-}
-
-/// getMemOperand - Return a MachineMemOperand object describing the memory
-/// reference performed by this memory reference.
-MachineMemOperand MemSDNode::getMemOperand() const {
- int Flags = 0;
- if (isa<LoadSDNode>(this))
- Flags = MachineMemOperand::MOLoad;
- else if (isa<StoreSDNode>(this))
- Flags = MachineMemOperand::MOStore;
- else if (isa<AtomicSDNode>(this)) {
- Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
- }
- else {
- const MemIntrinsicSDNode* MemIntrinNode = dyn_cast<MemIntrinsicSDNode>(this);
- assert(MemIntrinNode && "Unknown MemSDNode opcode!");
- if (MemIntrinNode->readMem()) Flags |= MachineMemOperand::MOLoad;
- if (MemIntrinNode->writeMem()) Flags |= MachineMemOperand::MOStore;
- }
-
- int Size = (getMemoryVT().getSizeInBits() + 7) >> 3;
- if (isVolatile()) Flags |= MachineMemOperand::MOVolatile;
-
- // Check if the memory reference references a frame index
- const FrameIndexSDNode *FI =
- dyn_cast<const FrameIndexSDNode>(getBasePtr().getNode());
- if (!getSrcValue() && FI)
- return MachineMemOperand(PseudoSourceValue::getFixedStack(FI->getIndex()),
- Flags, 0, Size, getAlignment());
- else
- return MachineMemOperand(getSrcValue(), Flags, getSrcValueOffset(),
- Size, getAlignment());
+ MemoryVT(memvt), MMO(mmo) {
+ SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile());
+ assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
+ assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
}
/// Profile - Gather unique data for the node.
@@ -5003,19 +5185,30 @@ void SDNode::Profile(FoldingSetNodeID &ID) const {
AddNodeIDNode(ID, this);
}
-static ManagedStatic<std::set<MVT, MVT::compareRawBits> > EVTs;
-static MVT VTs[MVT::LAST_VALUETYPE];
+namespace {
+ struct EVTArray {
+ std::vector<EVT> VTs;
+
+ EVTArray() {
+ VTs.reserve(MVT::LAST_VALUETYPE);
+ for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
+ VTs.push_back(MVT((MVT::SimpleValueType)i));
+ }
+ };
+}
+
+static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
+static ManagedStatic<EVTArray> SimpleVTArray;
static ManagedStatic<sys::SmartMutex<true> > VTMutex;
/// getValueTypeList - Return a pointer to the specified value type.
///
-const MVT *SDNode::getValueTypeList(MVT VT) {
- sys::SmartScopedLock<true> Lock(&*VTMutex);
+const EVT *SDNode::getValueTypeList(EVT VT) {
if (VT.isExtended()) {
+ sys::SmartScopedLock<true> Lock(*VTMutex);
return &(*EVTs->insert(VT).first);
} else {
- VTs[VT.getSimpleVT()] = VT;
- return &VTs[VT.getSimpleVT()];
+ return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
}
}
@@ -5186,14 +5379,12 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::PCMARKER: return "PCMarker";
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
case ISD::SRCVALUE: return "SrcValue";
- case ISD::MEMOPERAND: return "MemOperand";
case ISD::EntryToken: return "EntryToken";
case ISD::TokenFactor: return "TokenFactor";
case ISD::AssertSext: return "AssertSext";
case ISD::AssertZext: return "AssertZext";
case ISD::BasicBlock: return "BasicBlock";
- case ISD::ARG_FLAGS: return "ArgFlags";
case ISD::VALUETYPE: return "ValueType";
case ISD::Register: return "Register";
@@ -5208,6 +5399,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FRAMEADDR: return "FRAMEADDR";
case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
case ISD::EXCEPTIONADDR: return "EXCEPTIONADDR";
+ case ISD::LSDAADDR: return "LSDAADDR";
case ISD::EHSELECTION: return "EHSELECTION";
case ISD::EH_RETURN: return "EH_RETURN";
case ISD::ConstantPool: return "ConstantPool";
@@ -5239,10 +5431,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::INLINEASM: return "inlineasm";
case ISD::DBG_LABEL: return "dbg_label";
case ISD::EH_LABEL: return "eh_label";
- case ISD::DECLARE: return "declare";
case ISD::HANDLENODE: return "handlenode";
- case ISD::FORMAL_ARGUMENTS: return "formal_arguments";
- case ISD::CALL: return "call";
// Unary operators
case ISD::FABS: return "fabs";
@@ -5332,7 +5521,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::CONVERT_RNDSAT: {
switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
- default: assert(0 && "Unknown cvt code!");
+ default: llvm_unreachable("Unknown cvt code!");
case ISD::CVT_FF: return "cvt_ff";
case ISD::CVT_FS: return "cvt_fs";
case ISD::CVT_FU: return "cvt_fu";
@@ -5351,7 +5540,6 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::BR_JT: return "br_jt";
case ISD::BRCOND: return "brcond";
case ISD::BR_CC: return "br_cc";
- case ISD::RET: return "ret";
case ISD::CALLSEQ_START: return "callseq_start";
case ISD::CALLSEQ_END: return "callseq_end";
@@ -5384,7 +5572,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::CONDCODE:
switch (cast<CondCodeSDNode>(this)->get()) {
- default: assert(0 && "Unknown setcc condition!");
+ default: llvm_unreachable("Unknown setcc condition!");
case ISD::SETOEQ: return "setoeq";
case ISD::SETOGT: return "setogt";
case ISD::SETOGE: return "setoge";
@@ -5463,14 +5651,26 @@ void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
if (getValueType(i) == MVT::Other)
OS << "ch";
else
- OS << getValueType(i).getMVTString();
+ OS << getValueType(i).getEVTString();
}
OS << " = " << getOperationName(G);
}
void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
- if (!isTargetOpcode() && getOpcode() == ISD::VECTOR_SHUFFLE) {
- const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(this);
+ if (const MachineSDNode *MN = dyn_cast<MachineSDNode>(this)) {
+ if (!MN->memoperands_empty()) {
+ OS << "<";
+ OS << "Mem:";
+ for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
+ e = MN->memoperands_end(); i != e; ++i) {
+ OS << **i;
+ if (next(i) != e)
+ OS << " ";
+ }
+ OS << ">";
+ }
+ } else if (const ShuffleVectorSDNode *SVN =
+ dyn_cast<ShuffleVectorSDNode>(this)) {
OS << "<";
for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
int Idx = SVN->getMaskElt(i);
@@ -5481,9 +5681,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << Idx;
}
OS << ">";
- }
-
- if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
+ } else if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
OS << '<' << CSDN->getAPIntValue() << '>';
} else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle)
@@ -5505,13 +5703,13 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << " + " << offset;
else
OS << " " << offset;
- if (unsigned char TF = GADN->getTargetFlags())
+ if (unsigned int TF = GADN->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const FrameIndexSDNode *FIDN = dyn_cast<FrameIndexSDNode>(this)) {
OS << "<" << FIDN->getIndex() << ">";
} else if (const JumpTableSDNode *JTDN = dyn_cast<JumpTableSDNode>(this)) {
OS << "<" << JTDN->getIndex() << ">";
- if (unsigned char TF = JTDN->getTargetFlags())
+ if (unsigned int TF = JTDN->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(this)){
int offset = CP->getOffset();
@@ -5523,7 +5721,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << " + " << offset;
else
OS << " " << offset;
- if (unsigned char TF = CP->getTargetFlags())
+ if (unsigned int TF = CP->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
OS << "<";
@@ -5541,80 +5739,47 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
} else if (const ExternalSymbolSDNode *ES =
dyn_cast<ExternalSymbolSDNode>(this)) {
OS << "'" << ES->getSymbol() << "'";
- if (unsigned char TF = ES->getTargetFlags())
+ if (unsigned int TF = ES->getTargetFlags())
OS << " [TF=" << TF << ']';
} else if (const SrcValueSDNode *M = dyn_cast<SrcValueSDNode>(this)) {
if (M->getValue())
OS << "<" << M->getValue() << ">";
else
OS << "<null>";
- } else if (const MemOperandSDNode *M = dyn_cast<MemOperandSDNode>(this)) {
- if (M->MO.getValue())
- OS << "<" << M->MO.getValue() << ":" << M->MO.getOffset() << ">";
- else
- OS << "<null:" << M->MO.getOffset() << ">";
- } else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(this)) {
- OS << N->getArgFlags().getArgFlagsString();
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
- OS << ":" << N->getVT().getMVTString();
+ OS << ":" << N->getVT().getEVTString();
}
else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
- const Value *SrcValue = LD->getSrcValue();
- int SrcOffset = LD->getSrcValueOffset();
- OS << " <";
- if (SrcValue)
- OS << SrcValue;
- else
- OS << "null";
- OS << ":" << SrcOffset << ">";
+ OS << " <" << *LD->getMemOperand();
bool doExt = true;
switch (LD->getExtensionType()) {
default: doExt = false; break;
- case ISD::EXTLOAD: OS << " <anyext "; break;
- case ISD::SEXTLOAD: OS << " <sext "; break;
- case ISD::ZEXTLOAD: OS << " <zext "; break;
+ case ISD::EXTLOAD: OS << ", anyext"; break;
+ case ISD::SEXTLOAD: OS << ", sext"; break;
+ case ISD::ZEXTLOAD: OS << ", zext"; break;
}
if (doExt)
- OS << LD->getMemoryVT().getMVTString() << ">";
+ OS << " from " << LD->getMemoryVT().getEVTString();
const char *AM = getIndexedModeName(LD->getAddressingMode());
if (*AM)
- OS << " " << AM;
- if (LD->isVolatile())
- OS << " <volatile>";
- OS << " alignment=" << LD->getAlignment();
+ OS << ", " << AM;
+
+ OS << ">";
} else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
- const Value *SrcValue = ST->getSrcValue();
- int SrcOffset = ST->getSrcValueOffset();
- OS << " <";
- if (SrcValue)
- OS << SrcValue;
- else
- OS << "null";
- OS << ":" << SrcOffset << ">";
+ OS << " <" << *ST->getMemOperand();
if (ST->isTruncatingStore())
- OS << " <trunc " << ST->getMemoryVT().getMVTString() << ">";
+ OS << ", trunc to " << ST->getMemoryVT().getEVTString();
const char *AM = getIndexedModeName(ST->getAddressingMode());
if (*AM)
- OS << " " << AM;
- if (ST->isVolatile())
- OS << " <volatile>";
- OS << " alignment=" << ST->getAlignment();
- } else if (const AtomicSDNode* AT = dyn_cast<AtomicSDNode>(this)) {
- const Value *SrcValue = AT->getSrcValue();
- int SrcOffset = AT->getSrcValueOffset();
- OS << " <";
- if (SrcValue)
- OS << SrcValue;
- else
- OS << "null";
- OS << ":" << SrcOffset << ">";
- if (AT->isVolatile())
- OS << " <volatile>";
- OS << " alignment=" << AT->getAlignment();
+ OS << ", " << AM;
+
+ OS << ">";
+ } else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
+ OS << " <" << *M->getMemOperand() << ">";
}
}
@@ -5635,16 +5800,17 @@ static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
if (N->getOperand(i).getNode()->hasOneUse())
DumpNodes(N->getOperand(i).getNode(), indent+2, G);
else
- cerr << "\n" << std::string(indent+2, ' ')
- << (void*)N->getOperand(i).getNode() << ": <multiple use>";
+ errs() << "\n" << std::string(indent+2, ' ')
+ << (void*)N->getOperand(i).getNode() << ": <multiple use>";
- cerr << "\n" << std::string(indent, ' ');
+ errs() << "\n";
+ errs().indent(indent);
N->dump(G);
}
void SelectionDAG::dump() const {
- cerr << "SelectionDAG has " << AllNodes.size() << " nodes:";
+ errs() << "SelectionDAG has " << AllNodes.size() << " nodes:";
for (allnodes_const_iterator I = allnodes_begin(), E = allnodes_end();
I != E; ++I) {
@@ -5655,7 +5821,7 @@ void SelectionDAG::dump() const {
if (getRoot().getNode()) DumpNodes(getRoot().getNode(), 2, this);
- cerr << "\n\n";
+ errs() << "\n\n";
}
void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const {
@@ -5699,6 +5865,11 @@ void SDNode::dumpr() const {
DumpNodesr(errs(), this, 0, 0, once);
}
+void SDNode::dumpr(const SelectionDAG *G) const {
+ VisitedSDNodeSet once;
+ DumpNodesr(errs(), this, 0, G, once);
+}
+
// getAddressSpace - Return the address space this GlobalAddress belongs to.
unsigned GlobalAddressSDNode::getAddressSpace() const {
@@ -5717,7 +5888,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
unsigned &SplatBitSize,
bool &HasAnyUndefs,
unsigned MinSplatBits) {
- MVT VT = getValueType(0);
+ EVT VT = getValueType(0);
assert(VT.isVector() && "Expected a vector type");
unsigned sz = VT.getSizeInBits();
if (MinSplatBits > sz)
@@ -5767,7 +5938,7 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
SplatValue = HighValue | LowValue;
SplatUndef = HighUndef & LowUndef;
-
+
sz = HalfSize;
}
@@ -5775,14 +5946,14 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
return true;
}
-bool ShuffleVectorSDNode::isSplatMask(const int *Mask, MVT VT) {
+bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
// Find the first non-undef value in the shuffle mask.
unsigned i, e;
for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
/* search */;
assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
-
+
// Make sure all remaining elements are either undef or the same as the first
// non-undef value.
for (int Idx = Mask[i]; i != e; ++i)
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
index 260911e..9017e43 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Constants.h"
+#include "llvm/Constants.h"
#include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
@@ -49,6 +50,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -104,14 +106,14 @@ static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
}
/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
-/// MVTs that represent all the individual underlying
+/// EVTs that represent all the individual underlying
/// non-aggregate types that comprise it.
///
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
- SmallVectorImpl<MVT> &ValueVTs,
+ SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets = 0,
uint64_t StartingOffset = 0) {
// Given a struct type, recursively traverse the elements.
@@ -135,9 +137,9 @@ static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
return;
}
// Interpret void as zero return values.
- if (Ty == Type::VoidTy)
+ if (Ty == Type::getVoidTy(Ty->getContext()))
return;
- // Base case: we can get an MVT for this LLVM IR type.
+ // Base case: we can get an EVT for this LLVM IR type.
ValueVTs.push_back(TLI.getValueType(Ty));
if (Offsets)
Offsets->push_back(StartingOffset);
@@ -161,7 +163,7 @@ namespace llvm {
/// ValueVTs - The value types of the values, which may not be legal, and
/// may need be promoted or synthesized from one or more registers.
///
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
/// RegVTs - The value types of the registers. This is the same size as
/// ValueVTs and it records, for each value, what the type of the assigned
@@ -172,7 +174,7 @@ namespace llvm {
/// getRegisterType member function, however when with physical registers
/// it is necessary to have a separate record of the types.
///
- SmallVector<MVT, 4> RegVTs;
+ SmallVector<EVT, 4> RegVTs;
/// Regs - This list holds the registers assigned to the values.
/// Each legal or promoted value requires one register, and each
@@ -184,21 +186,21 @@ namespace llvm {
RegsForValue(const TargetLowering &tli,
const SmallVector<unsigned, 4> &regs,
- MVT regvt, MVT valuevt)
+ EVT regvt, EVT valuevt)
: TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
RegsForValue(const TargetLowering &tli,
const SmallVector<unsigned, 4> &regs,
- const SmallVector<MVT, 4> &regvts,
- const SmallVector<MVT, 4> &valuevts)
+ const SmallVector<EVT, 4> &regvts,
+ const SmallVector<EVT, 4> &valuevts)
: TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
- RegsForValue(const TargetLowering &tli,
+ RegsForValue(LLVMContext &Context, const TargetLowering &tli,
unsigned Reg, const Type *Ty) : TLI(&tli) {
ComputeValueVTs(tli, Ty, ValueVTs);
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- MVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = TLI->getNumRegisters(ValueVT);
- MVT RegisterVT = TLI->getRegisterType(ValueVT);
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
+ EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT);
@@ -352,11 +354,11 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
unsigned PHIReg = ValueMap[PN];
assert(PHIReg && "PHI node does not have an assigned virtual register!");
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
- MVT VT = ValueVTs[vti];
- unsigned NumRegisters = TLI.getNumRegisters(VT);
+ EVT VT = ValueVTs[vti];
+ unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
for (unsigned i = 0; i != NumRegisters; ++i)
BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
@@ -366,7 +368,7 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
}
}
-unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
+unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
}
@@ -378,15 +380,15 @@ unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
/// will assign registers for each member or element.
///
unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, V->getType(), ValueVTs);
unsigned FirstReg = 0;
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- MVT ValueVT = ValueVTs[Value];
- MVT RegisterVT = TLI.getRegisterType(ValueVT);
+ EVT ValueVT = ValueVTs[Value];
+ EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
- unsigned NumRegs = TLI.getNumRegisters(ValueVT);
+ unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
for (unsigned i = 0; i != NumRegs; ++i) {
unsigned R = MakeReg(RegisterVT);
if (!FirstReg) FirstReg = R;
@@ -402,7 +404,7 @@ unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
/// (ISD::AssertSext).
static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
const SDValue *Parts,
- unsigned NumParts, MVT PartVT, MVT ValueVT,
+ unsigned NumParts, EVT PartVT, EVT ValueVT,
ISD::NodeType AssertOp = ISD::DELETED_NODE) {
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -418,11 +420,11 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
unsigned RoundParts = NumParts & (NumParts - 1) ?
1 << Log2_32(NumParts) : NumParts;
unsigned RoundBits = PartBits * RoundParts;
- MVT RoundVT = RoundBits == ValueBits ?
- ValueVT : MVT::getIntegerVT(RoundBits);
+ EVT RoundVT = RoundBits == ValueBits ?
+ ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
SDValue Lo, Hi;
- MVT HalfVT = MVT::getIntegerVT(RoundBits/2);
+ EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
if (RoundParts > 2) {
Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
@@ -439,7 +441,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
if (RoundParts < NumParts) {
// Assemble the trailing non-power-of-2 part.
unsigned OddParts = NumParts - RoundParts;
- MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
+ EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
Hi = getCopyFromParts(DAG, dl,
Parts+RoundParts, OddParts, PartVT, OddVT);
@@ -447,7 +449,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
Lo = Val;
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
+ EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
DAG.getConstant(Lo.getValueType().getSizeInBits(),
@@ -457,11 +459,11 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
}
} else if (ValueVT.isVector()) {
// Handle a multi-element vector.
- MVT IntermediateVT, RegisterVT;
+ EVT IntermediateVT, RegisterVT;
unsigned NumIntermediates;
unsigned NumRegs =
- TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
- RegisterVT);
+ TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
+ NumIntermediates, RegisterVT);
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
@@ -494,11 +496,11 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
ValueVT, &Ops[0], NumIntermediates);
} else if (PartVT.isFloatingPoint()) {
// FP split into multiple FP parts (for ppcf128)
- assert(ValueVT == MVT(MVT::ppcf128) && PartVT == MVT(MVT::f64) &&
+ assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
"Unexpected split");
SDValue Lo, Hi;
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[0]);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[1]);
+ Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
+ Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
if (TLI.isBigEndian())
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
@@ -506,7 +508,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
// FP split into integer parts (soft fp)
assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split");
- MVT IntVT = MVT::getIntegerVT(ValueVT.getSizeInBits());
+ EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
}
}
@@ -555,7 +557,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
- assert(0 && "Unknown mismatch!");
+ llvm_unreachable("Unknown mismatch!");
return SDValue();
}
@@ -563,11 +565,11 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
- SDValue *Parts, unsigned NumParts, MVT PartVT,
+ SDValue *Parts, unsigned NumParts, EVT PartVT,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- MVT PtrVT = TLI.getPointerTy();
- MVT ValueVT = Val.getValueType();
+ EVT PtrVT = TLI.getPointerTy();
+ EVT ValueVT = Val.getValueType();
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
@@ -588,10 +590,10 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
assert(NumParts == 1 && "Do not know what to promote to!");
Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
} else if (PartVT.isInteger() && ValueVT.isInteger()) {
- ValueVT = MVT::getIntegerVT(NumParts * PartBits);
+ ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
} else {
- assert(0 && "Unknown mismatch!");
+ llvm_unreachable("Unknown mismatch!");
}
} else if (PartBits == ValueVT.getSizeInBits()) {
// Different types of the same size.
@@ -600,10 +602,10 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
// If the parts cover less bits than value has, truncate the value.
if (PartVT.isInteger() && ValueVT.isInteger()) {
- ValueVT = MVT::getIntegerVT(NumParts * PartBits);
+ ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
} else {
- assert(0 && "Unknown mismatch!");
+ llvm_unreachable("Unknown mismatch!");
}
}
@@ -634,19 +636,19 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
// The odd parts were reversed by getCopyToParts - unreverse them.
std::reverse(Parts + RoundParts, Parts + NumParts);
NumParts = RoundParts;
- ValueVT = MVT::getIntegerVT(NumParts * PartBits);
+ ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
}
// The number of parts is a power of 2. Repeatedly bisect the value using
// EXTRACT_ELEMENT.
Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
- MVT::getIntegerVT(ValueVT.getSizeInBits()),
+ EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
Val);
for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
for (unsigned i = 0; i < NumParts; i += StepSize) {
unsigned ThisBits = StepSize * PartBits / 2;
- MVT ThisVT = MVT::getIntegerVT (ThisBits);
+ EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
SDValue &Part0 = Parts[i];
SDValue &Part1 = Parts[i+StepSize/2];
@@ -692,11 +694,10 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
}
// Handle a multi-element vector.
- MVT IntermediateVT, RegisterVT;
+ EVT IntermediateVT, RegisterVT;
unsigned NumIntermediates;
- unsigned NumRegs = TLI
- .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
- RegisterVT);
+ unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
+ IntermediateVT, NumIntermediates, RegisterVT);
unsigned NumElements = ValueVT.getVectorNumElements();
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
@@ -750,8 +751,10 @@ void SelectionDAGLowering::clear() {
NodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
+ EdgeMapping.clear();
DAG.clear();
CurDebugLoc = DebugLoc::getUnknownLoc();
+ HasTailCall = false;
}
/// getRoot - Return the current virtual root of the Selection DAG,
@@ -817,8 +820,7 @@ void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
// Note: this doesn't use InstVisitor, because it has to work with
// ConstantExpr's in addition to instructions.
switch (Opcode) {
- default: assert(0 && "Unknown instruction type encountered!");
- abort();
+ default: llvm_unreachable("Unknown instruction type encountered!");
// Build the switch statement using the Instruction.def file.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
@@ -831,7 +833,7 @@ SDValue SelectionDAGLowering::getValue(const Value *V) {
if (N.getNode()) return N;
if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
- MVT VT = TLI.getValueType(V->getType(), true);
+ EVT VT = TLI.getValueType(V->getType(), true);
if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
return N = DAG.getConstant(*CI, VT);
@@ -860,6 +862,10 @@ SDValue SelectionDAGLowering::getValue(const Value *V) {
for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
OI != OE; ++OI) {
SDNode *Val = getValue(*OI).getNode();
+ // If the operand is an empty aggregate, there are no values.
+ if (!Val) continue;
+ // Add each leaf value from the operand to the Constants list
+ // to form a flattened list of all the values.
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
Constants.push_back(SDValue(Val, i));
}
@@ -871,14 +877,14 @@ SDValue SelectionDAGLowering::getValue(const Value *V) {
assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
"Unknown struct or array constant!");
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, C->getType(), ValueVTs);
unsigned NumElts = ValueVTs.size();
if (NumElts == 0)
return SDValue(); // empty struct
SmallVector<SDValue, 4> Constants(NumElts);
for (unsigned i = 0; i != NumElts; ++i) {
- MVT EltVT = ValueVTs[i];
+ EVT EltVT = ValueVTs[i];
if (isa<UndefValue>(C))
Constants[i] = DAG.getUNDEF(EltVT);
else if (EltVT.isFloatingPoint())
@@ -900,7 +906,7 @@ SDValue SelectionDAGLowering::getValue(const Value *V) {
Ops.push_back(getValue(CP->getOperand(i)));
} else {
assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
- MVT EltVT = TLI.getValueType(VecTy->getElementType());
+ EVT EltVT = TLI.getValueType(VecTy->getElementType());
SDValue Op;
if (EltVT.isFloatingPoint())
@@ -927,30 +933,24 @@ SDValue SelectionDAGLowering::getValue(const Value *V) {
unsigned InReg = FuncInfo.ValueMap[V];
assert(InReg && "Value not in map!");
- RegsForValue RFV(TLI, InReg, V->getType());
+ RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
}
void SelectionDAGLowering::visitRet(ReturnInst &I) {
- if (I.getNumOperands() == 0) {
- DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(),
- MVT::Other, getControlRoot()));
- return;
- }
-
- SmallVector<SDValue, 8> NewValues;
- NewValues.push_back(getControlRoot());
+ SDValue Chain = getControlRoot();
+ SmallVector<ISD::OutputArg, 8> Outs;
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues == 0) continue;
SDValue RetOp = getValue(I.getOperand(i));
for (unsigned j = 0, f = NumValues; j != f; ++j) {
- MVT VT = ValueVTs[j];
+ EVT VT = ValueVTs[j];
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
@@ -965,13 +965,13 @@ void SelectionDAGLowering::visitRet(ReturnInst &I) {
// conventions. The frontend should mark functions whose return values
// require promoting with signext or zeroext attributes.
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
- MVT MinVT = TLI.getRegisterType(MVT::i32);
+ EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
if (VT.bitsLT(MinVT))
VT = MinVT;
}
- unsigned NumParts = TLI.getNumRegisters(VT);
- MVT PartVT = TLI.getRegisterType(VT);
+ unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
+ EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurDebugLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
@@ -981,14 +981,30 @@ void SelectionDAGLowering::visitRet(ReturnInst &I) {
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
if (F->paramHasAttr(0, Attribute::InReg))
Flags.setInReg();
- for (unsigned i = 0; i < NumParts; ++i) {
- NewValues.push_back(Parts[i]);
- NewValues.push_back(DAG.getArgFlags(Flags));
- }
+
+ // Propagate extension type if any
+ if (F->paramHasAttr(0, Attribute::SExt))
+ Flags.setSExt();
+ else if (F->paramHasAttr(0, Attribute::ZExt))
+ Flags.setZExt();
+
+ for (unsigned i = 0; i < NumParts; ++i)
+ Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
}
}
- DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
- &NewValues[0], NewValues.size()));
+
+ bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
+ CallingConv::ID CallConv =
+ DAG.getMachineFunction().getFunction()->getCallingConv();
+ Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
+ Outs, getCurDebugLoc(), DAG);
+
+ // Verify that the target's LowerReturn behaved as expected.
+ assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
+ "LowerReturn didn't return a valid chain!");
+
+ // Update the DAG with the new chain value resulting from return lowering.
+ DAG.setRoot(Chain);
}
/// CopyToExportRegsIfNeeded - If the given value has virtual registers
@@ -1073,7 +1089,7 @@ static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
default:
- assert(0 && "Invalid FCmp predicate opcode!");
+ llvm_unreachable("Invalid FCmp predicate opcode!");
FOC = FPC = ISD::SETFALSE;
break;
}
@@ -1099,7 +1115,7 @@ static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
case ICmpInst::ICMP_SGT: return ISD::SETGT;
case ICmpInst::ICMP_UGT: return ISD::SETUGT;
default:
- assert(0 && "Invalid ICmp predicate opcode!");
+ llvm_unreachable("Invalid ICmp predicate opcode!");
return ISD::SETNE;
}
}
@@ -1131,7 +1147,7 @@ SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
Condition = getFCmpCondCode(FC->getPredicate());
} else {
Condition = ISD::SETEQ; // silence warning.
- assert(0 && "Unknown compare instruction");
+ llvm_unreachable("Unknown compare instruction");
}
CaseBlock CB(Condition, BOp->getOperand(0),
@@ -1142,7 +1158,7 @@ SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
}
// Create a CaseBlock record representing this branch.
- CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
+ CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
NULL, TBB, FBB, CurBB);
SwitchCases.push_back(CB);
}
@@ -1229,7 +1245,7 @@ void SelectionDAGLowering::visitBr(BranchInst &I) {
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CurMBB;
- if (++BBI != CurMBB->getParent()->end())
+ if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
if (I.isUnconditional()) {
@@ -1290,14 +1306,14 @@ void SelectionDAGLowering::visitBr(BranchInst &I) {
// Okay, we decided not to do this, remove any inserted MBB's and clear
// SwitchCases.
for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
- CurMBB->getParent()->erase(SwitchCases[i].ThisBB);
+ FuncInfo.MF->erase(SwitchCases[i].ThisBB);
SwitchCases.clear();
}
}
// Create a CaseBlock record representing this branch.
- CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
+ CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
NULL, Succ0MBB, Succ1MBB, CurMBB);
// Use visitSwitchCase to actually insert the fast branch sequence for this
// cond branch.
@@ -1315,9 +1331,11 @@ void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
if (CB.CmpMHS == NULL) {
// Fold "(X == true)" to X and "(X == false)" to !X to
// handle common cases produced by branch lowering.
- if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
+ if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
+ CB.CC == ISD::SETEQ)
Cond = CondLHS;
- else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
+ else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
+ CB.CC == ISD::SETEQ) {
SDValue True = DAG.getConstant(1, CondLHS.getValueType());
Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
} else
@@ -1329,7 +1347,7 @@ void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
SDValue CmpOp = getValue(CB.CmpMHS);
- MVT VT = CmpOp.getValueType();
+ EVT VT = CmpOp.getValueType();
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
@@ -1350,7 +1368,7 @@ void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CurMBB;
- if (++BBI != CurMBB->getParent()->end())
+ if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
// If the lhs block is the next block, invert the condition so that we can
@@ -1385,7 +1403,7 @@ void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
// Emit the code for the jump table
assert(JT.Reg != -1U && "Should lower JT Header first!");
- MVT PTy = TLI.getPointerTy();
+ EVT PTy = TLI.getPointerTy();
SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
JT.Reg, PTy);
SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
@@ -1402,7 +1420,7 @@ void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
// conditional branch to default mbb if the result is greater than the
// difference between smallest and largest cases.
SDValue SwitchOp = getValue(JTH.SValue);
- MVT VT = SwitchOp.getValueType();
+ EVT VT = SwitchOp.getValueType();
SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
DAG.getConstant(JTH.First, VT));
@@ -1411,12 +1429,7 @@ void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
// can be used as an index into the jump table in a subsequent basic block.
// This value may be smaller or larger than the target's pointer type, and
// therefore require extension or truncating.
- if (VT.bitsGT(TLI.getPointerTy()))
- SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
- TLI.getPointerTy(), SUB);
- else
- SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
- TLI.getPointerTy(), SUB);
+ SwitchOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
@@ -1435,7 +1448,7 @@ void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CurMBB;
- if (++BBI != CurMBB->getParent()->end())
+ if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
@@ -1454,7 +1467,7 @@ void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
// Subtract the minimum value
SDValue SwitchOp = getValue(B.SValue);
- MVT VT = SwitchOp.getValueType();
+ EVT VT = SwitchOp.getValueType();
SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
DAG.getConstant(B.First, VT));
@@ -1464,13 +1477,7 @@ void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
SUB, DAG.getConstant(B.Range, VT),
ISD::SETUGT);
- SDValue ShiftOp;
- if (VT.bitsGT(TLI.getPointerTy()))
- ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
- TLI.getPointerTy(), SUB);
- else
- ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
- TLI.getPointerTy(), SUB);
+ SDValue ShiftOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
@@ -1480,7 +1487,7 @@ void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CurMBB;
- if (++BBI != CurMBB->getParent()->end())
+ if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
MachineBasicBlock* MBB = B.Cases[0].ThisBB;
@@ -1531,7 +1538,7 @@ void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CurMBB;
- if (++BBI != CurMBB->getParent()->end())
+ if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
if (NextMBB == NextBlock)
@@ -1584,13 +1591,13 @@ bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
// Get the MachineFunction which holds the current MBB. This is used when
// inserting any additional MBBs necessary to represent the switch.
- MachineFunction *CurMF = CurMBB->getParent();
+ MachineFunction *CurMF = FuncInfo.MF;
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CR.CaseBB;
- if (++BBI != CurMBB->getParent()->end())
+ if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
// TODO: If any two of the cases has the same destination, and if one value
@@ -1698,14 +1705,11 @@ bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
// Get the MachineFunction which holds the current MBB. This is used when
// inserting any additional MBBs necessary to represent the switch.
- MachineFunction *CurMF = CurMBB->getParent();
+ MachineFunction *CurMF = FuncInfo.MF;
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CR.CaseBB;
-
- if (++BBI != CurMBB->getParent()->end())
- NextBlock = BBI;
+ ++BBI;
const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
@@ -1771,14 +1775,11 @@ bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
MachineBasicBlock* Default) {
// Get the MachineFunction which holds the current MBB. This is used when
// inserting any additional MBBs necessary to represent the switch.
- MachineFunction *CurMF = CurMBB->getParent();
+ MachineFunction *CurMF = FuncInfo.MF;
// Figure out which block is immediately after the current one.
- MachineBasicBlock *NextBlock = 0;
MachineFunction::iterator BBI = CR.CaseBB;
-
- if (++BBI != CurMBB->getParent()->end())
- NextBlock = BBI;
+ ++BBI;
Case& FrontCase = *CR.Range.first;
Case& BackCase = *(CR.Range.second-1);
@@ -1898,14 +1899,15 @@ bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
MachineBasicBlock* Default){
- unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
+ EVT PTy = TLI.getPointerTy();
+ unsigned IntPtrBits = PTy.getSizeInBits();
Case& FrontCase = *CR.Range.first;
Case& BackCase = *(CR.Range.second-1);
// Get the MachineFunction which holds the current MBB. This is used when
// inserting any additional MBBs necessary to represent the switch.
- MachineFunction *CurMF = CurMBB->getParent();
+ MachineFunction *CurMF = FuncInfo.MF;
// If target does not have legal shift left, do not emit bit tests at all.
if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
@@ -2069,7 +2071,6 @@ size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
- MachineFunction::iterator BBI = CurMBB;
MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
@@ -2174,24 +2175,26 @@ void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
if (!isa<VectorType>(I.getType()) &&
Op2.getValueType() != TLI.getShiftAmountTy()) {
// If the operand is smaller than the shift count type, promote it.
- if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
+ EVT PTy = TLI.getPointerTy();
+ EVT STy = TLI.getShiftAmountTy();
+ if (STy.bitsGT(Op2.getValueType()))
Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
TLI.getShiftAmountTy(), Op2);
// If the operand is larger than the shift count type but the shift
// count type has enough bits to represent any shift value, truncate
// it now. This is a common case and it exposes the truncate to
// optimization early.
- else if (TLI.getShiftAmountTy().getSizeInBits() >=
+ else if (STy.getSizeInBits() >=
Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
TLI.getShiftAmountTy(), Op2);
// Otherwise we'll need to temporarily settle for some other
// convenient type; type legalization will make adjustments as
// needed.
- else if (TLI.getPointerTy().bitsLT(Op2.getValueType()))
+ else if (PTy.bitsLT(Op2.getValueType()))
Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
TLI.getPointerTy(), Op2);
- else if (TLI.getPointerTy().bitsGT(Op2.getValueType()))
+ else if (PTy.bitsGT(Op2.getValueType()))
Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
TLI.getPointerTy(), Op2);
}
@@ -2209,7 +2212,9 @@ void SelectionDAGLowering::visitICmp(User &I) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode = getICmpCondCode(predicate);
- setValue(&I, DAG.getSetCC(getCurDebugLoc(),MVT::i1, Op1, Op2, Opcode));
+
+ EVT DestVT = TLI.getValueType(I.getType());
+ setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
}
void SelectionDAGLowering::visitFCmp(User &I) {
@@ -2221,38 +2226,12 @@ void SelectionDAGLowering::visitFCmp(User &I) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Condition = getFCmpCondCode(predicate);
- setValue(&I, DAG.getSetCC(getCurDebugLoc(), MVT::i1, Op1, Op2, Condition));
-}
-
-void SelectionDAGLowering::visitVICmp(User &I) {
- ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
- if (VICmpInst *IC = dyn_cast<VICmpInst>(&I))
- predicate = IC->getPredicate();
- else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
- predicate = ICmpInst::Predicate(IC->getPredicate());
- SDValue Op1 = getValue(I.getOperand(0));
- SDValue Op2 = getValue(I.getOperand(1));
- ISD::CondCode Opcode = getICmpCondCode(predicate);
- setValue(&I, DAG.getVSetCC(getCurDebugLoc(), Op1.getValueType(),
- Op1, Op2, Opcode));
-}
-
-void SelectionDAGLowering::visitVFCmp(User &I) {
- FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
- if (VFCmpInst *FC = dyn_cast<VFCmpInst>(&I))
- predicate = FC->getPredicate();
- else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
- predicate = FCmpInst::Predicate(FC->getPredicate());
- SDValue Op1 = getValue(I.getOperand(0));
- SDValue Op2 = getValue(I.getOperand(1));
- ISD::CondCode Condition = getFCmpCondCode(predicate);
- MVT DestVT = TLI.getValueType(I.getType());
-
- setValue(&I, DAG.getVSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
+ EVT DestVT = TLI.getValueType(I.getType());
+ setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
}
void SelectionDAGLowering::visitSelect(User &I) {
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I.getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues != 0) {
@@ -2277,7 +2256,7 @@ void SelectionDAGLowering::visitSelect(User &I) {
void SelectionDAGLowering::visitTrunc(User &I) {
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
}
@@ -2285,7 +2264,7 @@ void SelectionDAGLowering::visitZExt(User &I) {
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
}
@@ -2293,14 +2272,14 @@ void SelectionDAGLowering::visitSExt(User &I) {
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitFPTrunc(User &I) {
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
DestVT, N, DAG.getIntPtrConstant(0)));
}
@@ -2308,35 +2287,35 @@ void SelectionDAGLowering::visitFPTrunc(User &I) {
void SelectionDAGLowering::visitFPExt(User &I){
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitFPToUI(User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitFPToSI(User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitUIToFP(User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitSIToFP(User &I){
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
@@ -2344,14 +2323,9 @@ void SelectionDAGLowering::visitPtrToInt(User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- MVT SrcVT = N.getValueType();
- MVT DestVT = TLI.getValueType(I.getType());
- SDValue Result;
- if (DestVT.bitsLT(SrcVT))
- Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
- else
- // Note: ZERO_EXTEND can handle cases where the sizes are equal too
- Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
+ EVT SrcVT = N.getValueType();
+ EVT DestVT = TLI.getValueType(I.getType());
+ SDValue Result = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
setValue(&I, Result);
}
@@ -2359,19 +2333,14 @@ void SelectionDAGLowering::visitIntToPtr(User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- MVT SrcVT = N.getValueType();
- MVT DestVT = TLI.getValueType(I.getType());
- if (DestVT.bitsLT(SrcVT))
- setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
- else
- // Note: ZERO_EXTEND can handle cases where the sizes are equal too
- setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
- DestVT, N));
+ EVT SrcVT = N.getValueType();
+ EVT DestVT = TLI.getValueType(I.getType());
+ setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
}
void SelectionDAGLowering::visitBitCast(User &I) {
SDValue N = getValue(I.getOperand(0));
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
// BitCast assures us that source and destination are the same size so this
// is either a BIT_CONVERT or a no-op.
@@ -2422,7 +2391,8 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
// Convert the ConstantVector mask operand into an array of ints, with -1
// representing undef values.
SmallVector<Constant*, 8> MaskElts;
- cast<Constant>(I.getOperand(2))->getVectorElements(MaskElts);
+ cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
+ MaskElts);
unsigned MaskNumElts = MaskElts.size();
for (unsigned i = 0; i != MaskNumElts; ++i) {
if (isa<UndefValue>(MaskElts[i]))
@@ -2431,8 +2401,8 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
}
- MVT VT = TLI.getValueType(I.getType());
- MVT SrcVT = Src1.getValueType();
+ EVT VT = TLI.getValueType(I.getType());
+ EVT SrcVT = Src1.getValueType();
unsigned SrcNumElts = SrcVT.getVectorNumElements();
if (SrcNumElts == MaskNumElts) {
@@ -2531,7 +2501,7 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
}
}
- if (RangeUse[0] == 0 && RangeUse[0] == 0) {
+ if (RangeUse[0] == 0 && RangeUse[1] == 0) {
setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
return;
}
@@ -2566,8 +2536,8 @@ void SelectionDAGLowering::visitShuffleVector(User &I) {
// We can't use either concat vectors or extract subvectors so fall back to
// replacing the shuffle with extract and build vector.
// to insert and build vector.
- MVT EltVT = VT.getVectorElementType();
- MVT PtrVT = TLI.getPointerTy();
+ EVT EltVT = VT.getVectorElementType();
+ EVT PtrVT = TLI.getPointerTy();
SmallVector<SDValue,8> Ops;
for (unsigned i = 0; i != MaskNumElts; ++i) {
if (Mask[i] < 0) {
@@ -2598,9 +2568,9 @@ void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
I.idx_begin(), I.idx_end());
- SmallVector<MVT, 4> AggValueVTs;
+ SmallVector<EVT, 4> AggValueVTs;
ComputeValueVTs(TLI, AggTy, AggValueVTs);
- SmallVector<MVT, 4> ValValueVTs;
+ SmallVector<EVT, 4> ValValueVTs;
ComputeValueVTs(TLI, ValTy, ValValueVTs);
unsigned NumAggValues = AggValueVTs.size();
@@ -2637,7 +2607,7 @@ void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
I.idx_begin(), I.idx_end());
- SmallVector<MVT, 4> ValValueVTs;
+ SmallVector<EVT, 4> ValValueVTs;
ComputeValueVTs(TLI, ValTy, ValValueVTs);
unsigned NumValValues = ValValueVTs.size();
@@ -2682,7 +2652,8 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) {
uint64_t Offs =
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
- unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
+ EVT PTy = TLI.getPointerTy();
+ unsigned PtrBits = PTy.getSizeInBits();
if (PtrBits < 64) {
OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
TLI.getPointerTy(),
@@ -2700,12 +2671,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) {
// If the index is smaller or larger than intptr_t, truncate or extend
// it.
- if (IdxN.getValueType().bitsLT(N.getValueType()))
- IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
- N.getValueType(), IdxN);
- else if (IdxN.getValueType().bitsGT(N.getValueType()))
- IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
- N.getValueType(), IdxN);
+ IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
// If this is a multiply by a power of two, turn it into a shl
// immediately. This is a very common case.
@@ -2749,13 +2715,8 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
- MVT IntPtr = TLI.getPointerTy();
- if (IntPtr.bitsLT(AllocSize.getValueType()))
- AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
- IntPtr, AllocSize);
- else if (IntPtr.bitsGT(AllocSize.getValueType()))
- AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
- IntPtr, AllocSize);
+ EVT IntPtr = TLI.getPointerTy();
+ AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
@@ -2784,7 +2745,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
// Inform the Frame Information that we have just allocated a variable-sized
// object.
- CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
+ FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
}
void SelectionDAGLowering::visitLoad(LoadInst &I) {
@@ -2795,7 +2756,7 @@ void SelectionDAGLowering::visitLoad(LoadInst &I) {
bool isVolatile = I.isVolatile();
unsigned Alignment = I.getAlignment();
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
@@ -2818,14 +2779,13 @@ void SelectionDAGLowering::visitLoad(LoadInst &I) {
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(NumValues);
- MVT PtrVT = Ptr.getValueType();
+ EVT PtrVT = Ptr.getValueType();
for (unsigned i = 0; i != NumValues; ++i) {
SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
- DAG.getNode(ISD::ADD, getCurDebugLoc(),
- PtrVT, Ptr,
- DAG.getConstant(Offsets[i], PtrVT)),
- SV, Offsets[i],
- isVolatile, Alignment);
+ DAG.getNode(ISD::ADD, getCurDebugLoc(),
+ PtrVT, Ptr,
+ DAG.getConstant(Offsets[i], PtrVT)),
+ SV, Offsets[i], isVolatile, Alignment);
Values[i] = L;
Chains[i] = L.getValue(1);
}
@@ -2850,7 +2810,7 @@ void SelectionDAGLowering::visitStore(StoreInst &I) {
Value *SrcV = I.getOperand(0);
Value *PtrV = I.getOperand(1);
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
unsigned NumValues = ValueVTs.size();
@@ -2865,7 +2825,7 @@ void SelectionDAGLowering::visitStore(StoreInst &I) {
SDValue Root = getRoot();
SmallVector<SDValue, 4> Chains(NumValues);
- MVT PtrVT = Ptr.getValueType();
+ EVT PtrVT = Ptr.getValueType();
bool isVolatile = I.isVolatile();
unsigned Alignment = I.getAlignment();
for (unsigned i = 0; i != NumValues; ++i)
@@ -2874,8 +2834,7 @@ void SelectionDAGLowering::visitStore(StoreInst &I) {
DAG.getNode(ISD::ADD, getCurDebugLoc(),
PtrVT, Ptr,
DAG.getConstant(Offsets[i], PtrVT)),
- PtrV, Offsets[i],
- isVolatile, Alignment);
+ PtrV, Offsets[i], isVolatile, Alignment);
DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
MVT::Other, &Chains[0], NumValues));
@@ -2915,24 +2874,18 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
Ops.push_back(Op);
}
- std::vector<MVT> VTArray;
- if (I.getType() != Type::VoidTy) {
- MVT VT = TLI.getValueType(I.getType());
- if (VT.isVector()) {
- const VectorType *DestTy = cast<VectorType>(I.getType());
- MVT EltVT = TLI.getValueType(DestTy->getElementType());
-
- VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
- assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
- }
-
- assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
- VTArray.push_back(VT);
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(TLI, I.getType(), ValueVTs);
+#ifndef NDEBUG
+ for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
+ assert(TLI.isTypeLegal(ValueVTs[Val]) &&
+ "Intrinsic uses a non-legal type?");
}
+#endif // NDEBUG
if (HasChain)
- VTArray.push_back(MVT::Other);
+ ValueVTs.push_back(MVT::Other);
- SDVTList VTs = DAG.getVTList(&VTArray[0], VTArray.size());
+ SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
// Create the node.
SDValue Result;
@@ -2947,7 +2900,7 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
else if (!HasChain)
Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
VTs, &Ops[0], Ops.size());
- else if (I.getType() != Type::VoidTy)
+ else if (I.getType() != Type::getVoidTy(*DAG.getContext()))
Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
VTs, &Ops[0], Ops.size());
else
@@ -2961,9 +2914,9 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
else
DAG.setRoot(Chain);
}
- if (I.getType() != Type::VoidTy) {
+ if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
- MVT VT = TLI.getValueType(PTy);
+ EVT VT = TLI.getValueType(PTy);
Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
}
setValue(&I, Result);
@@ -3890,7 +3843,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
if (isValidDebugInfoIntrinsic(RSI, OptLevel) && DW
&& DW->ShouldEmitDwarfDebug()) {
unsigned LabelID =
- DW->RecordRegionStart(cast<GlobalVariable>(RSI.getContext()));
+ DW->RecordRegionStart(RSI.getContext());
DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
getRoot(), LabelID));
}
@@ -3905,7 +3858,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
MachineFunction &MF = DAG.getMachineFunction();
- DISubprogram Subprogram(cast<GlobalVariable>(REI.getContext()));
+ DISubprogram Subprogram(REI.getContext());
if (isInlinedFnEnd(REI, MF.getFunction())) {
// This is end of inlined function. Debugging information for inlined
@@ -3924,7 +3877,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
unsigned LabelID =
- DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
+ DW->RecordRegionEnd(REI.getContext());
DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
getRoot(), LabelID));
return 0;
@@ -3932,8 +3885,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::dbg_func_start: {
DwarfWriter *DW = DAG.getDwarfWriter();
DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
- if (!isValidDebugInfoIntrinsic(FSI, CodeGenOpt::None) || !DW
- || !DW->ShouldEmitDwarfDebug())
+ if (!isValidDebugInfoIntrinsic(FSI, CodeGenOpt::None))
return 0;
MachineFunction &MF = DAG.getMachineFunction();
@@ -3954,9 +3906,11 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
// Record the source line.
setCurDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
+ if (!DW || !DW->ShouldEmitDwarfDebug())
+ return 0;
DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
- DISubprogram SP(cast<GlobalVariable>(FSI.getSubprogram()));
- DICompileUnit CU(PrevLocTpl.CompileUnit);
+ DISubprogram SP(FSI.getSubprogram());
+ DICompileUnit CU(PrevLocTpl.Scope);
unsigned LabelID = DW->RecordInlinedFnStart(SP, CU,
PrevLocTpl.Line,
PrevLocTpl.Col);
@@ -3967,23 +3921,44 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
// This is a beginning of a new function.
MF.setDefaultDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
-
+
+ if (!DW || !DW->ShouldEmitDwarfDebug())
+ return 0;
// llvm.dbg.func_start also defines beginning of function scope.
- DW->RecordRegionStart(cast<GlobalVariable>(FSI.getSubprogram()));
+ DW->RecordRegionStart(FSI.getSubprogram());
return 0;
}
case Intrinsic::dbg_declare: {
if (OptLevel != CodeGenOpt::None)
// FIXME: Variable debug info is not supported here.
return 0;
-
+ DwarfWriter *DW = DAG.getDwarfWriter();
+ if (!DW)
+ return 0;
DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
return 0;
- Value *Variable = DI.getVariable();
- DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
- getValue(DI.getAddress()), getValue(Variable)));
+ MDNode *Variable = DI.getVariable();
+ Value *Address = DI.getAddress();
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
+ Address = BCI->getOperand(0);
+ AllocaInst *AI = dyn_cast<AllocaInst>(Address);
+ // Don't handle byval struct arguments or VLAs, for example.
+ if (!AI)
+ return 0;
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI == FuncInfo.StaticAllocaMap.end())
+ return 0; // VLAs.
+ int FI = SI->second;
+#ifdef ATTACH_DEBUG_INFO_TO_AN_INSN
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
+ if (MMI)
+ MMI->setVariableDbgInfo(Variable, FI);
+#else
+ DW->RecordVariable(Variable, FI);
+#endif
return 0;
}
case Intrinsic::eh_exception: {
@@ -3998,54 +3973,45 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
}
- case Intrinsic::eh_selector_i32:
- case Intrinsic::eh_selector_i64: {
+ case Intrinsic::eh_selector: {
MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
- MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
- MVT::i32 : MVT::i64);
- if (MMI) {
- if (CurMBB->isLandingPad())
- AddCatchInfo(I, MMI, CurMBB);
- else {
+ if (CurMBB->isLandingPad())
+ AddCatchInfo(I, MMI, CurMBB);
+ else {
#ifndef NDEBUG
- FuncInfo.CatchInfoLost.insert(&I);
+ FuncInfo.CatchInfoLost.insert(&I);
#endif
- // FIXME: Mark exception selector register as live in. Hack for PR1508.
- unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) CurMBB->addLiveIn(Reg);
- }
-
- // Insert the EHSELECTION instruction.
- SDVTList VTs = DAG.getVTList(VT, MVT::Other);
- SDValue Ops[2];
- Ops[0] = getValue(I.getOperand(1));
- Ops[1] = getRoot();
- SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
- setValue(&I, Op);
- DAG.setRoot(Op.getValue(1));
- } else {
- setValue(&I, DAG.getConstant(0, VT));
+ // FIXME: Mark exception selector register as live in. Hack for PR1508.
+ unsigned Reg = TLI.getExceptionSelectorRegister();
+ if (Reg) CurMBB->addLiveIn(Reg);
}
+ // Insert the EHSELECTION instruction.
+ SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
+ SDValue Ops[2];
+ Ops[0] = getValue(I.getOperand(1));
+ Ops[1] = getRoot();
+ SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
+
+ DAG.setRoot(Op.getValue(1));
+
+ setValue(&I, DAG.getSExtOrTrunc(Op, dl, MVT::i32));
return 0;
}
- case Intrinsic::eh_typeid_for_i32:
- case Intrinsic::eh_typeid_for_i64: {
+ case Intrinsic::eh_typeid_for: {
MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
- MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
- MVT::i32 : MVT::i64);
if (MMI) {
// Find the type id for the given typeinfo.
GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
unsigned TypeID = MMI->getTypeIDFor(GV);
- setValue(&I, DAG.getConstant(TypeID, VT));
+ setValue(&I, DAG.getConstant(TypeID, MVT::i32));
} else {
// Return something different to eh_selector.
- setValue(&I, DAG.getConstant(1, VT));
+ setValue(&I, DAG.getConstant(1, MVT::i32));
}
return 0;
@@ -4073,14 +4039,9 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
case Intrinsic::eh_dwarf_cfa: {
- MVT VT = getValue(I.getOperand(1)).getValueType();
- SDValue CfaArg;
- if (VT.bitsGT(TLI.getPointerTy()))
- CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
- TLI.getPointerTy(), getValue(I.getOperand(1)));
- else
- CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
- TLI.getPointerTy(), getValue(I.getOperand(1)));
+ EVT VT = getValue(I.getOperand(1)).getValueType();
+ SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
+ TLI.getPointerTy());
SDValue Offset = DAG.getNode(ISD::ADD, dl,
TLI.getPointerTy(),
@@ -4096,7 +4057,6 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
Offset));
return 0;
}
-
case Intrinsic::convertff:
case Intrinsic::convertfsi:
case Intrinsic::convertfui:
@@ -4118,7 +4078,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::convertus: Code = ISD::CVT_US; break;
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
}
- MVT DestVT = TLI.getValueType(I.getType());
+ EVT DestVT = TLI.getValueType(I.getType());
Value* Op1 = I.getOperand(1);
setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
DAG.getValueType(DestVT),
@@ -4182,16 +4142,6 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
DAG.setRoot(Tmp.getValue(1));
return 0;
}
- case Intrinsic::part_select: {
- // Currently not implemented: just abort
- assert(0 && "part_select intrinsic not implemented");
- abort();
- }
- case Intrinsic::part_set: {
- // Currently not implemented: just abort
- assert(0 && "part_set intrinsic not implemented");
- abort();
- }
case Intrinsic::bswap:
setValue(&I, DAG.getNode(ISD::BSWAP, dl,
getValue(I.getOperand(1)).getValueType(),
@@ -4199,21 +4149,21 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
case Intrinsic::cttz: {
SDValue Arg = getValue(I.getOperand(1));
- MVT Ty = Arg.getValueType();
+ EVT Ty = Arg.getValueType();
SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
setValue(&I, result);
return 0;
}
case Intrinsic::ctlz: {
SDValue Arg = getValue(I.getOperand(1));
- MVT Ty = Arg.getValueType();
+ EVT Ty = Arg.getValueType();
SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
setValue(&I, result);
return 0;
}
case Intrinsic::ctpop: {
SDValue Arg = getValue(I.getOperand(1));
- MVT Ty = Arg.getValueType();
+ EVT Ty = Arg.getValueType();
SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
setValue(&I, result);
return 0;
@@ -4235,7 +4185,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
// Emit code into the DAG to store the stack guard onto the stack.
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- MVT PtrTy = TLI.getPointerTy();
+ EVT PtrTy = TLI.getPointerTy();
SDValue Src = getValue(I.getOperand(1)); // The guard's value.
AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
@@ -4289,7 +4239,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::gcread:
case Intrinsic::gcwrite:
- assert(0 && "GC failed to lower gcread/gcwrite intrinsics!");
+ llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
return 0;
case Intrinsic::flt_rounds: {
@@ -4373,9 +4323,76 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
}
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+/// For target-dependent requirements, a target should override
+/// TargetLowering::IsEligibleForTailCallOptimization.
+///
+static bool
+isInTailCallPosition(const Instruction *I, Attributes RetAttr,
+ const TargetLowering &TLI) {
+ const BasicBlock *ExitBB = I->getParent();
+ const TerminatorInst *Term = ExitBB->getTerminator();
+ const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
+ const Function *F = ExitBB->getParent();
+
+ // The block must end in a return statement or an unreachable.
+ if (!Ret && !isa<UnreachableInst>(Term)) return false;
+
+ // If I will have a chain, make sure no other instruction that will have a
+ // chain interposes between I and the return.
+ if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
+ !I->isSafeToSpeculativelyExecute())
+ for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
+ --BBI) {
+ if (&*BBI == I)
+ break;
+ if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
+ !BBI->isSafeToSpeculativelyExecute())
+ return false;
+ }
+
+ // If the block ends with a void return or unreachable, it doesn't matter
+ // what the call's return type is.
+ if (!Ret || Ret->getNumOperands() == 0) return true;
+
+ // Conservatively require the attributes of the call to match those of
+ // the return.
+ if (F->getAttributes().getRetAttributes() != RetAttr)
+ return false;
+
+ // Otherwise, make sure the unmodified return value of I is the return value.
+ for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
+ U = dyn_cast<Instruction>(U->getOperand(0))) {
+ if (!U)
+ return false;
+ if (!U->hasOneUse())
+ return false;
+ if (U == I)
+ break;
+ // Check for a truly no-op truncate.
+ if (isa<TruncInst>(U) &&
+ TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
+ continue;
+ // Check for a truly no-op bitcast.
+ if (isa<BitCastInst>(U) &&
+ (U->getOperand(0)->getType() == U->getType() ||
+ (isa<PointerType>(U->getOperand(0)->getType()) &&
+ isa<PointerType>(U->getType()))))
+ continue;
+ // Otherwise it's not a true no-op.
+ return false;
+ }
+
+ return true;
+}
void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
- bool IsTailCall,
+ bool isTailCall,
MachineBasicBlock *LandingPad) {
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
@@ -4385,8 +4402,9 @@ void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Args.reserve(CS.arg_size());
+ unsigned j = 1;
for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
- i != e; ++i) {
+ i != e; ++i, ++j) {
SDValue ArgNode = getValue(*i);
Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
@@ -4405,6 +4423,7 @@ void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
// Insert a label before the invoke call to mark the try range. This can be
// used to detect deletion of the invoke via the MachineModuleInfo.
BeginLabel = MMI->NextLabelID();
+
// Both PendingLoads and PendingExports must be flushed here;
// this call might not return.
(void)getRoot();
@@ -4412,17 +4431,35 @@ void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
getControlRoot(), BeginLabel));
}
+ // Check if target-independent constraints permit a tail call here.
+ // Target-dependent constraints are checked within TLI.LowerCallTo.
+ if (isTailCall &&
+ !isInTailCallPosition(CS.getInstruction(),
+ CS.getAttributes().getRetAttributes(),
+ TLI))
+ isTailCall = false;
+
std::pair<SDValue,SDValue> Result =
TLI.LowerCallTo(getRoot(), CS.getType(),
CS.paramHasAttr(0, Attribute::SExt),
CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
CS.getCallingConv(),
- IsTailCall && PerformTailCallOpt,
+ isTailCall,
+ !CS.getInstruction()->use_empty(),
Callee, Args, DAG, getCurDebugLoc());
- if (CS.getType() != Type::VoidTy)
+ assert((isTailCall || Result.second.getNode()) &&
+ "Non-null chain expected with non-tail call!");
+ assert((Result.second.getNode() || !Result.first.getNode()) &&
+ "Null value expected with tail call!");
+ if (Result.first.getNode())
setValue(CS.getInstruction(), Result.first);
- DAG.setRoot(Result.second);
+ // As a special case, a null chain means that a tail call has
+ // been emitted and the DAG root is already updated.
+ if (Result.second.getNode())
+ DAG.setRoot(Result.second);
+ else
+ HasTailCall = true;
if (LandingPad && MMI) {
// Insert a label at the end of the invoke call to mark the try range. This
@@ -4458,12 +4495,9 @@ void SelectionDAGLowering::visitCall(CallInst &I) {
// Check for well-known libc/libm calls. If the function is internal, it
// can't be a library call.
- unsigned NameLen = F->getNameLen();
- if (!F->hasLocalLinkage() && NameLen) {
- const char *NameStr = F->getNameStart();
- if (NameStr[0] == 'c' &&
- ((NameLen == 8 && !strcmp(NameStr, "copysign")) ||
- (NameLen == 9 && !strcmp(NameStr, "copysignf")))) {
+ if (!F->hasLocalLinkage() && F->hasName()) {
+ StringRef Name = F->getName();
+ if (Name == "copysign" || Name == "copysignf") {
if (I.getNumOperands() == 3 && // Basic sanity checks.
I.getOperand(1)->getType()->isFloatingPoint() &&
I.getType() == I.getOperand(1)->getType() &&
@@ -4474,10 +4508,7 @@ void SelectionDAGLowering::visitCall(CallInst &I) {
LHS.getValueType(), LHS, RHS));
return;
}
- } else if (NameStr[0] == 'f' &&
- ((NameLen == 4 && !strcmp(NameStr, "fabs")) ||
- (NameLen == 5 && !strcmp(NameStr, "fabsf")) ||
- (NameLen == 5 && !strcmp(NameStr, "fabsl")))) {
+ } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
if (I.getNumOperands() == 2 && // Basic sanity checks.
I.getOperand(1)->getType()->isFloatingPoint() &&
I.getType() == I.getOperand(1)->getType()) {
@@ -4486,30 +4517,36 @@ void SelectionDAGLowering::visitCall(CallInst &I) {
Tmp.getValueType(), Tmp));
return;
}
- } else if (NameStr[0] == 's' &&
- ((NameLen == 3 && !strcmp(NameStr, "sin")) ||
- (NameLen == 4 && !strcmp(NameStr, "sinf")) ||
- (NameLen == 4 && !strcmp(NameStr, "sinl")))) {
+ } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
if (I.getNumOperands() == 2 && // Basic sanity checks.
I.getOperand(1)->getType()->isFloatingPoint() &&
- I.getType() == I.getOperand(1)->getType()) {
+ I.getType() == I.getOperand(1)->getType() &&
+ I.onlyReadsMemory()) {
SDValue Tmp = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
- } else if (NameStr[0] == 'c' &&
- ((NameLen == 3 && !strcmp(NameStr, "cos")) ||
- (NameLen == 4 && !strcmp(NameStr, "cosf")) ||
- (NameLen == 4 && !strcmp(NameStr, "cosl")))) {
+ } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
if (I.getNumOperands() == 2 && // Basic sanity checks.
I.getOperand(1)->getType()->isFloatingPoint() &&
- I.getType() == I.getOperand(1)->getType()) {
+ I.getType() == I.getOperand(1)->getType() &&
+ I.onlyReadsMemory()) {
SDValue Tmp = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
+ } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
+ if (I.getNumOperands() == 2 && // Basic sanity checks.
+ I.getOperand(1)->getType()->isFloatingPoint() &&
+ I.getType() == I.getOperand(1)->getType() &&
+ I.onlyReadsMemory()) {
+ SDValue Tmp = getValue(I.getOperand(1));
+ setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
+ Tmp.getValueType(), Tmp));
+ return;
+ }
}
}
} else if (isa<InlineAsm>(I.getOperand(0))) {
@@ -4523,7 +4560,12 @@ void SelectionDAGLowering::visitCall(CallInst &I) {
else
Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
- LowerCallTo(&I, Callee, I.isTailCall());
+ // Check if we can potentially perform a tail call. More detailed
+ // checking is be done within LowerCallTo, after more information
+ // about the call is known.
+ bool isTailCall = PerformTailCallOpt && I.isTailCall();
+
+ LowerCallTo(&I, Callee, isTailCall);
}
@@ -4539,9 +4581,9 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
SmallVector<SDValue, 8> Parts;
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
// Copy the legal parts from the registers.
- MVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = TLI->getNumRegisters(ValueVT);
- MVT RegisterVT = RegVTs[Value];
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
+ EVT RegisterVT = RegVTs[Value];
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
@@ -4570,7 +4612,7 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
// FIXME: We capture more information than the dag can represent. For
// now, just use the tightest assertzext/assertsext possible.
bool isSExt = true;
- MVT FromVT(MVT::Other);
+ EVT FromVT(MVT::Other);
if (NumSignBits == RegSize)
isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
else if (NumZeroBits >= RegSize-1)
@@ -4620,9 +4662,9 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
unsigned NumRegs = Regs.size();
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
- MVT ValueVT = ValueVTs[Value];
- unsigned NumParts = TLI->getNumRegisters(ValueVT);
- MVT RegisterVT = RegVTs[Value];
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
+ EVT RegisterVT = RegVTs[Value];
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
&Parts[Part], NumParts, RegisterVT);
@@ -4665,15 +4707,15 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code,
bool HasMatching,unsigned MatchingIdx,
SelectionDAG &DAG,
std::vector<SDValue> &Ops) const {
- MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
+ EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
unsigned Flag = Code | (Regs.size() << 3);
if (HasMatching)
Flag |= 0x80000000 | (MatchingIdx << 16);
Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
- unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
- MVT RegisterVT = RegVTs[Value];
+ unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
+ EVT RegisterVT = RegVTs[Value];
for (unsigned i = 0; i != NumRegs; ++i) {
assert(Reg < Regs.size() && "Mismatch in # registers expected");
Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
@@ -4688,11 +4730,11 @@ static const TargetRegisterClass *
isAllocatableRegister(unsigned Reg, MachineFunction &MF,
const TargetLowering &TLI,
const TargetRegisterInfo *TRI) {
- MVT FoundVT = MVT::Other;
+ EVT FoundVT = MVT::Other;
const TargetRegisterClass *FoundRC = 0;
for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
E = TRI->regclass_end(); RCI != E; ++RCI) {
- MVT ThisVT = MVT::Other;
+ EVT ThisVT = MVT::Other;
const TargetRegisterClass *RC = *RCI;
// If none of the the value types for this register class are valid, we
@@ -4765,10 +4807,11 @@ public:
}
}
- /// getCallOperandValMVT - Return the MVT of the Value* that this operand
+ /// getCallOperandValEVT - Return the EVT of the Value* that this operand
/// corresponds to. If there is no Value* for this operand, it returns
/// MVT::Other.
- MVT getCallOperandValMVT(const TargetLowering &TLI,
+ EVT getCallOperandValEVT(LLVMContext &Context,
+ const TargetLowering &TLI,
const TargetData *TD) const {
if (CallOperandVal == 0) return MVT::Other;
@@ -4794,7 +4837,7 @@ public:
case 32:
case 64:
case 128:
- OpTy = IntegerType::get(BitSize);
+ OpTy = IntegerType::get(Context, BitSize);
break;
}
}
@@ -4830,6 +4873,8 @@ void SelectionDAGLowering::
GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
std::set<unsigned> &OutputRegs,
std::set<unsigned> &InputRegs) {
+ LLVMContext &Context = FuncInfo.Fn->getContext();
+
// Compute whether this value requires an input register, an output register,
// or both.
bool isOutReg = false;
@@ -4869,10 +4914,10 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
// value disagrees with the register class we plan to stick this in.
if (OpInfo.Type == InlineAsm::isInput &&
PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
- // Try to convert to the first MVT that the reg class contains. If the
+ // Try to convert to the first EVT that the reg class contains. If the
// types are identical size, use a bitcast to convert (e.g. two differing
// vector types).
- MVT RegVT = *PhysReg.second->vt_begin();
+ EVT RegVT = *PhysReg.second->vt_begin();
if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
RegVT, OpInfo.CallOperand);
@@ -4882,18 +4927,19 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
// bitcast to the corresponding integer type. This turns an f64 value
// into i64, which can be passed with two i32 values on a 32-bit
// machine.
- RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
+ RegVT = EVT::getIntegerVT(Context,
+ OpInfo.ConstraintVT.getSizeInBits());
OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
}
}
- NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
+ NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
}
- MVT RegVT;
- MVT ValueVT = OpInfo.ConstraintVT;
+ EVT RegVT;
+ EVT ValueVT = OpInfo.ConstraintVT;
// If this is a constraint for a specific physical register, like {r17},
// assign it now.
@@ -5047,7 +5093,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
- MVT OpVT = MVT::Other;
+ EVT OpVT = MVT::Other;
// Compute the value type for each operand.
switch (OpInfo.Type) {
@@ -5060,7 +5106,8 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
// The return value of the call is this value. As such, there is no
// corresponding argument.
- assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
+ assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
+ "Bad inline asm!");
if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
OpVT = TLI.getValueType(STy->getElementType(ResNo));
} else {
@@ -5080,13 +5127,16 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
// If this is an input or an indirect output, process the call argument.
// BasicBlocks are labels, currently appearing only in asm's.
if (OpInfo.CallOperandVal) {
+ // Strip bitcasts, if any. This mostly comes up for functions.
+ OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
+
if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
} else {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
}
- OpVT = OpInfo.getCallOperandValMVT(TLI, TD);
+ OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
}
OpInfo.ConstraintVT = OpVT;
@@ -5108,9 +5158,9 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
Input.ConstraintVT.isInteger()) ||
(OpInfo.ConstraintVT.getSizeInBits() !=
Input.ConstraintVT.getSizeInBits())) {
- cerr << "llvm: error: Unsupported asm: input constraint with a "
- << "matching output constraint of incompatible type!\n";
- exit(1);
+ llvm_report_error("Unsupported asm: input constraint"
+ " with a matching output constraint of incompatible"
+ " type!");
}
Input.ConstraintVT = OpInfo.ConstraintVT;
}
@@ -5213,9 +5263,8 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
// Copy the output from the appropriate register. Find a register that
// we can use.
if (OpInfo.AssignedRegs.Regs.empty()) {
- cerr << "llvm: error: Couldn't allocate output reg for constraint '"
- << OpInfo.ConstraintCode << "'!\n";
- exit(1);
+ llvm_report_error("Couldn't allocate output reg for"
+ " constraint '" + OpInfo.ConstraintCode + "'!");
}
// If this is an indirect operand, store through the pointer after the
@@ -5225,7 +5274,8 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
OpInfo.CallOperandVal));
} else {
// This is the result value of the call.
- assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
+ assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
+ "Bad inline asm!");
// Concatenate this output onto the outputs list.
RetValRegs.append(OpInfo.AssignedRegs);
}
@@ -5268,15 +5318,13 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
|| (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
// Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
if (OpInfo.isIndirect) {
- cerr << "llvm: error: "
- "Don't know how to handle tied indirect "
- "register inputs yet!\n";
- exit(1);
+ llvm_report_error("Don't know how to handle tied indirect "
+ "register inputs yet!");
}
RegsForValue MatchedRegs;
MatchedRegs.TLI = &TLI;
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
- MVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
+ EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
MatchedRegs.RegVTs.push_back(RegVT);
MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
@@ -5313,9 +5361,8 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
hasMemory, Ops, DAG);
if (Ops.empty()) {
- cerr << "llvm: error: Invalid operand for inline asm constraint '"
- << OpInfo.ConstraintCode << "'!\n";
- exit(1);
+ llvm_report_error("Invalid operand for inline asm"
+ " constraint '" + OpInfo.ConstraintCode + "'!");
}
// Add information to the INLINEASM node to know about this input.
@@ -5345,9 +5392,8 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
// Copy the input into the appropriate registers.
if (OpInfo.AssignedRegs.Regs.empty()) {
- cerr << "llvm: error: Couldn't allocate output reg for constraint '"
- << OpInfo.ConstraintCode << "'!\n";
- exit(1);
+ llvm_report_error("Couldn't allocate input reg for"
+ " constraint '"+ OpInfo.ConstraintCode +"'!");
}
OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
@@ -5385,7 +5431,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
// FIXME: Why don't we do this for inline asms with MRVs?
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
- MVT ResultType = TLI.getValueType(CS.getType());
+ EVT ResultType = TLI.getValueType(CS.getType());
// If any of the results of the inline asm is a vector, it may have the
// wrong width/num elts. This can happen for register classes that can
@@ -5449,45 +5495,56 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) {
// multiply on 64-bit targets.
// FIXME: Malloc inst should go away: PR715.
uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
- if (ElementSize != 1)
+ if (ElementSize != 1) {
+ // Src is always 32-bits, make sure the constant fits.
+ assert(Src.getValueType() == MVT::i32);
+ ElementSize = (uint32_t)ElementSize;
Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
Src, DAG.getConstant(ElementSize, Src.getValueType()));
+ }
- MVT IntPtr = TLI.getPointerTy();
+ EVT IntPtr = TLI.getPointerTy();
- if (IntPtr.bitsLT(Src.getValueType()))
- Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
- else if (IntPtr.bitsGT(Src.getValueType()))
- Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
+ Src = DAG.getZExtOrTrunc(Src, getCurDebugLoc(), IntPtr);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Src;
- Entry.Ty = TLI.getTargetData()->getIntPtrType();
+ Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
Args.push_back(Entry);
+ bool isTailCall = PerformTailCallOpt &&
+ isInTailCallPosition(&I, Attribute::None, TLI);
std::pair<SDValue,SDValue> Result =
TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
- 0, CallingConv::C, PerformTailCallOpt,
+ 0, CallingConv::C, isTailCall,
+ /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("malloc", IntPtr),
Args, DAG, getCurDebugLoc());
- setValue(&I, Result.first); // Pointers always fit in registers
- DAG.setRoot(Result.second);
+ if (Result.first.getNode())
+ setValue(&I, Result.first); // Pointers always fit in registers
+ if (Result.second.getNode())
+ DAG.setRoot(Result.second);
}
void SelectionDAGLowering::visitFree(FreeInst &I) {
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = getValue(I.getOperand(0));
- Entry.Ty = TLI.getTargetData()->getIntPtrType();
+ Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
Args.push_back(Entry);
- MVT IntPtr = TLI.getPointerTy();
+ EVT IntPtr = TLI.getPointerTy();
+ bool isTailCall = PerformTailCallOpt &&
+ isInTailCallPosition(&I, Attribute::None, TLI);
std::pair<SDValue,SDValue> Result =
- TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
- 0, CallingConv::C, PerformTailCallOpt,
+ TLI.LowerCallTo(getRoot(), Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false,
+ 0, CallingConv::C, isTailCall,
+ /*isReturnValueUsed=*/true,
DAG.getExternalSymbol("free", IntPtr), Args, DAG,
getCurDebugLoc());
- DAG.setRoot(Result.second);
+ if (Result.second.getNode())
+ DAG.setRoot(Result.second);
}
void SelectionDAGLowering::visitVAStart(CallInst &I) {
@@ -5521,161 +5578,31 @@ void SelectionDAGLowering::visitVACopy(CallInst &I) {
DAG.getSrcValue(I.getOperand(2))));
}
-/// TargetLowering::LowerArguments - This is the default LowerArguments
-/// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
-/// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
-/// integrated into SDISel.
-void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &ArgValues,
- DebugLoc dl) {
- // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
- SmallVector<SDValue, 3+16> Ops;
- Ops.push_back(DAG.getRoot());
- Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
- Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
-
- // Add one result value for each formal argument.
- SmallVector<MVT, 16> RetVals;
- unsigned j = 1;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
- I != E; ++I, ++j) {
- SmallVector<MVT, 4> ValueVTs;
- ComputeValueVTs(*this, I->getType(), ValueVTs);
- for (unsigned Value = 0, NumValues = ValueVTs.size();
- Value != NumValues; ++Value) {
- MVT VT = ValueVTs[Value];
- const Type *ArgTy = VT.getTypeForMVT();
- ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment =
- getTargetData()->getABITypeAlignment(ArgTy);
-
- if (F.paramHasAttr(j, Attribute::ZExt))
- Flags.setZExt();
- if (F.paramHasAttr(j, Attribute::SExt))
- Flags.setSExt();
- if (F.paramHasAttr(j, Attribute::InReg))
- Flags.setInReg();
- if (F.paramHasAttr(j, Attribute::StructRet))
- Flags.setSRet();
- if (F.paramHasAttr(j, Attribute::ByVal)) {
- Flags.setByVal();
- const PointerType *Ty = cast<PointerType>(I->getType());
- const Type *ElementTy = Ty->getElementType();
- unsigned FrameAlign = getByValTypeAlignment(ElementTy);
- unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
- // For ByVal, alignment should be passed from FE. BE will guess if
- // this info is not there but there are cases it cannot get right.
- if (F.getParamAlignment(j))
- FrameAlign = F.getParamAlignment(j);
- Flags.setByValAlign(FrameAlign);
- Flags.setByValSize(FrameSize);
- }
- if (F.paramHasAttr(j, Attribute::Nest))
- Flags.setNest();
- Flags.setOrigAlign(OriginalAlignment);
-
- MVT RegisterVT = getRegisterType(VT);
- unsigned NumRegs = getNumRegisters(VT);
- for (unsigned i = 0; i != NumRegs; ++i) {
- RetVals.push_back(RegisterVT);
- ISD::ArgFlagsTy MyFlags = Flags;
- if (NumRegs > 1 && i == 0)
- MyFlags.setSplit();
- // if it isn't first piece, alignment must be 1
- else if (i > 0)
- MyFlags.setOrigAlign(1);
- Ops.push_back(DAG.getArgFlags(MyFlags));
- }
- }
- }
-
- RetVals.push_back(MVT::Other);
-
- // Create the node.
- SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, dl,
- DAG.getVTList(&RetVals[0], RetVals.size()),
- &Ops[0], Ops.size()).getNode();
-
- // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
- // allows exposing the loads that may be part of the argument access to the
- // first DAGCombiner pass.
- SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
-
- // The number of results should match up, except that the lowered one may have
- // an extra flag result.
- assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
- (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
- TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
- && "Lowering produced unexpected number of results!");
-
- // The FORMAL_ARGUMENTS node itself is likely no longer needed.
- if (Result != TmpRes.getNode() && Result->use_empty()) {
- HandleSDNode Dummy(DAG.getRoot());
- DAG.RemoveDeadNode(Result);
- }
-
- Result = TmpRes.getNode();
-
- unsigned NumArgRegs = Result->getNumValues() - 1;
- DAG.setRoot(SDValue(Result, NumArgRegs));
-
- // Set up the return result vector.
- unsigned i = 0;
- unsigned Idx = 1;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
- ++I, ++Idx) {
- SmallVector<MVT, 4> ValueVTs;
- ComputeValueVTs(*this, I->getType(), ValueVTs);
- for (unsigned Value = 0, NumValues = ValueVTs.size();
- Value != NumValues; ++Value) {
- MVT VT = ValueVTs[Value];
- MVT PartVT = getRegisterType(VT);
-
- unsigned NumParts = getNumRegisters(VT);
- SmallVector<SDValue, 4> Parts(NumParts);
- for (unsigned j = 0; j != NumParts; ++j)
- Parts[j] = SDValue(Result, i++);
-
- ISD::NodeType AssertOp = ISD::DELETED_NODE;
- if (F.paramHasAttr(Idx, Attribute::SExt))
- AssertOp = ISD::AssertSext;
- else if (F.paramHasAttr(Idx, Attribute::ZExt))
- AssertOp = ISD::AssertZext;
-
- ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
- PartVT, VT, AssertOp));
- }
- }
- assert(i == NumArgRegs && "Argument register count mismatch!");
-}
-
-
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
-/// implementation, which just inserts an ISD::CALL node, which is later custom
-/// lowered by the target to something concrete. FIXME: When all targets are
-/// migrated to using ISD::CALL, this hook should be integrated into SDISel.
+/// implementation, which just calls LowerCall.
+/// FIXME: When all targets are
+/// migrated to using LowerCall, this hook should be integrated into SDISel.
std::pair<SDValue, SDValue>
TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
bool RetSExt, bool RetZExt, bool isVarArg,
bool isInreg, unsigned NumFixedArgs,
- unsigned CallingConv, bool isTailCall,
+ CallingConv::ID CallConv, bool isTailCall,
+ bool isReturnValueUsed,
SDValue Callee,
ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
+
assert((!isTailCall || PerformTailCallOpt) &&
"isTailCall set when tail-call optimizations are disabled!");
- SmallVector<SDValue, 32> Ops;
- Ops.push_back(Chain); // Op#0 - Chain
- Ops.push_back(Callee);
-
// Handle all of the outgoing arguments.
+ SmallVector<ISD::OutputArg, 32> Outs;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
- MVT VT = ValueVTs[Value];
- const Type *ArgTy = VT.getTypeForMVT();
+ EVT VT = ValueVTs[Value];
+ const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
@@ -5707,8 +5634,8 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
Flags.setNest();
Flags.setOrigAlign(OriginalAlignment);
- MVT PartVT = getRegisterType(VT);
- unsigned NumParts = getNumRegisters(VT);
+ EVT PartVT = getRegisterType(RetTy->getContext(), VT);
+ unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
@@ -5719,75 +5646,105 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
- for (unsigned i = 0; i != NumParts; ++i) {
+ for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
- ISD::ArgFlagsTy MyFlags = Flags;
- if (NumParts > 1 && i == 0)
- MyFlags.setSplit();
- else if (i != 0)
- MyFlags.setOrigAlign(1);
-
- Ops.push_back(Parts[i]);
- Ops.push_back(DAG.getArgFlags(MyFlags));
+ ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
+ if (NumParts > 1 && j == 0)
+ MyFlags.Flags.setSplit();
+ else if (j != 0)
+ MyFlags.Flags.setOrigAlign(1);
+
+ Outs.push_back(MyFlags);
}
}
}
- // Figure out the result value types. We start by making a list of
- // the potentially illegal return value types.
- SmallVector<MVT, 4> LoweredRetTys;
- SmallVector<MVT, 4> RetTys;
+ // Handle the incoming return values from the call.
+ SmallVector<ISD::InputArg, 32> Ins;
+ SmallVector<EVT, 4> RetTys;
ComputeValueVTs(*this, RetTy, RetTys);
-
- // Then we translate that to a list of legal types.
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
- MVT VT = RetTys[I];
- MVT RegisterVT = getRegisterType(VT);
- unsigned NumRegs = getNumRegisters(VT);
- for (unsigned i = 0; i != NumRegs; ++i)
- LoweredRetTys.push_back(RegisterVT);
- }
-
- LoweredRetTys.push_back(MVT::Other); // Always has a chain.
-
- // Create the CALL node.
- SDValue Res = DAG.getCall(CallingConv, dl,
- isVarArg, isTailCall, isInreg,
- DAG.getVTList(&LoweredRetTys[0],
- LoweredRetTys.size()),
- &Ops[0], Ops.size(), NumFixedArgs
- );
- Chain = Res.getValue(LoweredRetTys.size() - 1);
-
- // Gather up the call result into a single value.
- if (RetTy != Type::VoidTy && !RetTys.empty()) {
- ISD::NodeType AssertOp = ISD::DELETED_NODE;
-
- if (RetSExt)
- AssertOp = ISD::AssertSext;
- else if (RetZExt)
- AssertOp = ISD::AssertZext;
-
- SmallVector<SDValue, 4> ReturnValues;
- unsigned RegNo = 0;
- for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
- MVT VT = RetTys[I];
- MVT RegisterVT = getRegisterType(VT);
- unsigned NumRegs = getNumRegisters(VT);
- unsigned RegNoEnd = NumRegs + RegNo;
- SmallVector<SDValue, 4> Results;
- for (; RegNo != RegNoEnd; ++RegNo)
- Results.push_back(Res.getValue(RegNo));
- SDValue ReturnValue =
- getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
- AssertOp);
- ReturnValues.push_back(ReturnValue);
+ EVT VT = RetTys[I];
+ EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ ISD::InputArg MyFlags;
+ MyFlags.VT = RegisterVT;
+ MyFlags.Used = isReturnValueUsed;
+ if (RetSExt)
+ MyFlags.Flags.setSExt();
+ if (RetZExt)
+ MyFlags.Flags.setZExt();
+ if (isInreg)
+ MyFlags.Flags.setInReg();
+ Ins.push_back(MyFlags);
}
- Res = DAG.getNode(ISD::MERGE_VALUES, dl,
- DAG.getVTList(&RetTys[0], RetTys.size()),
- &ReturnValues[0], ReturnValues.size());
}
+ // Check if target-dependent constraints permit a tail call here.
+ // Target-independent constraints should be checked by the caller.
+ if (isTailCall &&
+ !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
+ isTailCall = false;
+
+ SmallVector<SDValue, 4> InVals;
+ Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
+ Outs, Ins, dl, DAG, InVals);
+
+ // Verify that the target's LowerCall behaved as expected.
+ assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
+ "LowerCall didn't return a valid chain!");
+ assert((!isTailCall || InVals.empty()) &&
+ "LowerCall emitted a return value for a tail call!");
+ assert((isTailCall || InVals.size() == Ins.size()) &&
+ "LowerCall didn't emit the correct number of values!");
+ DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ assert(InVals[i].getNode() &&
+ "LowerCall emitted a null value!");
+ assert(Ins[i].VT == InVals[i].getValueType() &&
+ "LowerCall emitted a value with the wrong type!");
+ });
+
+ // For a tail call, the return value is merely live-out and there aren't
+ // any nodes in the DAG representing it. Return a special value to
+ // indicate that a tail call has been emitted and no more Instructions
+ // should be processed in the current block.
+ if (isTailCall) {
+ DAG.setRoot(Chain);
+ return std::make_pair(SDValue(), SDValue());
+ }
+
+ // Collect the legal value parts into potentially illegal values
+ // that correspond to the original function's return values.
+ ISD::NodeType AssertOp = ISD::DELETED_NODE;
+ if (RetSExt)
+ AssertOp = ISD::AssertSext;
+ else if (RetZExt)
+ AssertOp = ISD::AssertZext;
+ SmallVector<SDValue, 4> ReturnValues;
+ unsigned CurReg = 0;
+ for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
+ EVT VT = RetTys[I];
+ EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
+ unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
+
+ SDValue ReturnValue =
+ getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT,
+ AssertOp);
+ ReturnValues.push_back(ReturnValue);
+ CurReg += NumRegs;
+ }
+
+ // For a function returning void, there is no return value. We can't create
+ // such a node, so we just return a null return value in that case. In
+ // that case, nothing will actualy look at the value.
+ if (ReturnValues.empty())
+ return std::make_pair(SDValue(), Chain);
+
+ SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
+ DAG.getVTList(&RetTys[0], RetTys.size()),
+ &ReturnValues[0], ReturnValues.size());
+
return std::make_pair(Res, Chain);
}
@@ -5800,8 +5757,7 @@ void TargetLowering::LowerOperationWrapper(SDNode *N,
}
SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
- assert(0 && "LowerOperation not implemented for this target!");
- abort();
+ llvm_unreachable("LowerOperation not implemented for this target!");
return SDValue();
}
@@ -5813,7 +5769,7 @@ void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
"Copy from a reg to the same reg!");
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
- RegsForValue RFV(TLI, Reg, V->getType());
+ RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
SDValue Chain = DAG.getEntryNode();
RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
PendingExports.push_back(Chain);
@@ -5825,25 +5781,122 @@ void SelectionDAGISel::
LowerArguments(BasicBlock *LLVMBB) {
// If this is the entry block, emit arguments.
Function &F = *LLVMBB->getParent();
- SDValue OldRoot = SDL->DAG.getRoot();
- SmallVector<SDValue, 16> Args;
- TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc());
-
- unsigned a = 0;
- for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
- AI != E; ++AI) {
- SmallVector<MVT, 4> ValueVTs;
- ComputeValueVTs(TLI, AI->getType(), ValueVTs);
+ SelectionDAG &DAG = SDL->DAG;
+ SDValue OldRoot = DAG.getRoot();
+ DebugLoc dl = SDL->getCurDebugLoc();
+ const TargetData *TD = TLI.getTargetData();
+
+ // Set up the incoming argument description vector.
+ SmallVector<ISD::InputArg, 16> Ins;
+ unsigned Idx = 1;
+ for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
+ I != E; ++I, ++Idx) {
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(TLI, I->getType(), ValueVTs);
+ bool isArgValueUsed = !I->use_empty();
+ for (unsigned Value = 0, NumValues = ValueVTs.size();
+ Value != NumValues; ++Value) {
+ EVT VT = ValueVTs[Value];
+ const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
+ ISD::ArgFlagsTy Flags;
+ unsigned OriginalAlignment =
+ TD->getABITypeAlignment(ArgTy);
+
+ if (F.paramHasAttr(Idx, Attribute::ZExt))
+ Flags.setZExt();
+ if (F.paramHasAttr(Idx, Attribute::SExt))
+ Flags.setSExt();
+ if (F.paramHasAttr(Idx, Attribute::InReg))
+ Flags.setInReg();
+ if (F.paramHasAttr(Idx, Attribute::StructRet))
+ Flags.setSRet();
+ if (F.paramHasAttr(Idx, Attribute::ByVal)) {
+ Flags.setByVal();
+ const PointerType *Ty = cast<PointerType>(I->getType());
+ const Type *ElementTy = Ty->getElementType();
+ unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
+ unsigned FrameSize = TD->getTypeAllocSize(ElementTy);
+ // For ByVal, alignment should be passed from FE. BE will guess if
+ // this info is not there but there are cases it cannot get right.
+ if (F.getParamAlignment(Idx))
+ FrameAlign = F.getParamAlignment(Idx);
+ Flags.setByValAlign(FrameAlign);
+ Flags.setByValSize(FrameSize);
+ }
+ if (F.paramHasAttr(Idx, Attribute::Nest))
+ Flags.setNest();
+ Flags.setOrigAlign(OriginalAlignment);
+
+ EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
+ unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
+ if (NumRegs > 1 && i == 0)
+ MyFlags.Flags.setSplit();
+ // if it isn't first piece, alignment must be 1
+ else if (i > 0)
+ MyFlags.Flags.setOrigAlign(1);
+ Ins.push_back(MyFlags);
+ }
+ }
+ }
+
+ // Call the target to set up the argument values.
+ SmallVector<SDValue, 8> InVals;
+ SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
+ F.isVarArg(), Ins,
+ dl, DAG, InVals);
+
+ // Verify that the target's LowerFormalArguments behaved as expected.
+ assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
+ "LowerFormalArguments didn't return a valid chain!");
+ assert(InVals.size() == Ins.size() &&
+ "LowerFormalArguments didn't emit the correct number of values!");
+ DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ assert(InVals[i].getNode() &&
+ "LowerFormalArguments emitted a null value!");
+ assert(Ins[i].VT == InVals[i].getValueType() &&
+ "LowerFormalArguments emitted a value with the wrong type!");
+ });
+
+ // Update the DAG with the new chain value resulting from argument lowering.
+ DAG.setRoot(NewRoot);
+
+ // Set up the argument values.
+ unsigned i = 0;
+ Idx = 1;
+ for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
+ ++I, ++Idx) {
+ SmallVector<SDValue, 4> ArgValues;
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(TLI, I->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
- if (!AI->use_empty()) {
- SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues,
- SDL->getCurDebugLoc()));
+ for (unsigned Value = 0; Value != NumValues; ++Value) {
+ EVT VT = ValueVTs[Value];
+ EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
+ unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
+
+ if (!I->use_empty()) {
+ ISD::NodeType AssertOp = ISD::DELETED_NODE;
+ if (F.paramHasAttr(Idx, Attribute::SExt))
+ AssertOp = ISD::AssertSext;
+ else if (F.paramHasAttr(Idx, Attribute::ZExt))
+ AssertOp = ISD::AssertZext;
+
+ ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
+ PartVT, VT, AssertOp));
+ }
+ i += NumParts;
+ }
+ if (!I->use_empty()) {
+ SDL->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues,
+ SDL->getCurDebugLoc()));
// If this argument is live outside of the entry block, insert a copy from
// whereever we got it to the vreg that other BB's will reference it as.
- SDL->CopyToExportRegsIfNeeded(AI);
+ SDL->CopyToExportRegsIfNeeded(I);
}
- a += NumValues;
}
+ assert(i == InVals.size() && "Argument register count mismatch!");
// Finally, if the target has anything special to do, allow it to do so.
// FIXME: this should insert code into the DAG!
@@ -5908,11 +5961,11 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
// Remember that this register needs to added to the machine PHI node as
// the input for this MBB.
- SmallVector<MVT, 4> ValueVTs;
+ SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
- MVT VT = ValueVTs[vti];
- unsigned NumRegisters = TLI.getNumRegisters(VT);
+ EVT VT = ValueVTs[vti];
+ unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
for (unsigned i = 0, e = NumRegisters; i != e; ++i)
SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Reg += NumRegisters;
@@ -5962,11 +6015,11 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
// own moves. Second, this check is necessary becuase FastISel doesn't
// use CreateRegForValue to create registers, so it always creates
// exactly one register for each non-void instruction.
- MVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
+ EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
// Promote MVT::i1.
if (VT == MVT::i1)
- VT = TLI.getTypeToTransformTo(VT);
+ VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
else {
SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
return false;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h
index 057c841..06acc8a 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h
@@ -15,6 +15,7 @@
#define SELECTIONDAGBUILD_H
#include "llvm/Constants.h"
+#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#ifndef NDEBUG
@@ -23,6 +24,7 @@
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/CallSite.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetMachine.h"
#include <vector>
#include <set>
@@ -75,8 +77,6 @@ class TruncInst;
class UIToFPInst;
class UnreachableInst;
class UnwindInst;
-class VICmpInst;
-class VFCmpInst;
class VAArgInst;
class ZExtInst;
@@ -117,7 +117,7 @@ public:
SmallSet<Instruction*, 8> CatchInfoFound;
#endif
- unsigned MakeReg(MVT VT);
+ unsigned MakeReg(EVT VT);
/// isExportedInst - Return true if the specified value is an instruction
/// exported from its block.
@@ -345,9 +345,15 @@ public:
/// BitTestCases - Vector of BitTestBlock structures used to communicate
/// SwitchInst code generation information.
std::vector<BitTestBlock> BitTestCases;
-
+
+ /// PHINodesToUpdate - A list of phi instructions whose operand list will
+ /// be updated after processing the current basic block.
std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+ /// EdgeMapping - If an edge from CurMBB to any MBB is changed (e.g. due to
+ /// scheduler custom lowering), track the change here.
+ DenseMap<MachineBasicBlock*, MachineBasicBlock*> EdgeMapping;
+
// Emit PHI-node-operand constants only once even if used by multiple
// PHI nodes.
DenseMap<Constant*, unsigned> ConstantsOut;
@@ -363,11 +369,21 @@ public:
/// GFI - Garbage collection metadata for the function.
GCFunctionInfo *GFI;
+ /// HasTailCall - This is set to true if a call in the current
+ /// block has been translated as a tail call. In this case,
+ /// no subsequent DAG nodes should be created.
+ ///
+ bool HasTailCall;
+
+ LLVMContext *Context;
+
SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
FunctionLoweringInfo &funcinfo,
CodeGenOpt::Level ol)
: CurDebugLoc(DebugLoc::getUnknownLoc()),
- TLI(tli), DAG(dag), FuncInfo(funcinfo), OptLevel(ol) {
+ TLI(tli), DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
+ HasTailCall(false),
+ Context(dag.getContext()) {
}
void init(GCFunctionInfo *gfi, AliasAnalysis &aa);
@@ -489,8 +505,6 @@ private:
void visitAShr(User &I) { visitShift(I, ISD::SRA); }
void visitICmp(User &I);
void visitFCmp(User &I);
- void visitVICmp(User &I);
- void visitVFCmp(User &I);
// Visit the conversion instructions
void visitTrunc(User &I);
void visitZExt(User &I);
@@ -539,12 +553,10 @@ private:
void visitVACopy(CallInst &I);
void visitUserOp1(Instruction &I) {
- assert(0 && "UserOp1 should not exist at instruction selection time!");
- abort();
+ llvm_unreachable("UserOp1 should not exist at instruction selection time!");
}
void visitUserOp2(Instruction &I) {
- assert(0 && "UserOp2 should not exist at instruction selection time!");
- abort();
+ llvm_unreachable("UserOp2 should not exist at instruction selection time!");
}
const char *implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 9d72a12..ae98da5 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -16,6 +16,7 @@
#include "SelectionDAGBuild.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Constants.h"
#include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h"
@@ -29,6 +30,7 @@
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
@@ -47,8 +49,10 @@
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
@@ -150,12 +154,15 @@ namespace llvm {
// insert. The specified MachineInstr is created but not inserted into any
// basic blocks, and the scheduler passes ownership of it to this method.
MachineBasicBlock *TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- cerr << "If a target marks an instruction with "
- << "'usesCustomDAGSchedInserter', it must implement "
- << "TargetLowering::EmitInstrWithCustomInserter!\n";
- abort();
- return 0;
+ MachineBasicBlock *MBB,
+ DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
+#ifndef NDEBUG
+ errs() << "If a target marks an instruction with "
+ "'usesCustomDAGSchedInserter', it must implement "
+ "TargetLowering::EmitInstrWithCustomInserter!";
+#endif
+ llvm_unreachable(0);
+ return 0;
}
/// EmitLiveInCopy - Emit a copy for a live in physical register. If the
@@ -215,8 +222,11 @@ static void EmitLiveInCopy(MachineBasicBlock *MBB,
--Pos;
}
- TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
- CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
+ bool Emitted = TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
+ assert(Emitted && "Unable to issue a live-in copy instruction!\n");
+ (void) Emitted;
+
+CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
if (Coalesced) {
if (&*InsertPos == UseMI) ++InsertPos;
MBB->erase(UseMI);
@@ -247,8 +257,10 @@ static void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
E = MRI.livein_end(); LI != E; ++LI)
if (LI->second) {
const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
- TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
- LI->second, LI->first, RC, RC);
+ bool Emitted = TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
+ LI->second, LI->first, RC, RC);
+ assert(Emitted && "Unable to issue a live-in copy instruction!\n");
+ (void) Emitted;
}
}
}
@@ -258,7 +270,7 @@ static void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
//===----------------------------------------------------------------------===//
SelectionDAGISel::SelectionDAGISel(TargetMachine &tm, CodeGenOpt::Level OL) :
- FunctionPass(&ID), TM(tm), TLI(*tm.getTargetLowering()),
+ MachineFunctionPass(&ID), TM(tm), TLI(*tm.getTargetLowering()),
FuncInfo(new FunctionLoweringInfo(TLI)),
CurDAG(new SelectionDAG(TLI, *FuncInfo)),
SDL(new SelectionDAGLowering(*CurDAG, TLI, *FuncInfo, OL)),
@@ -273,44 +285,42 @@ SelectionDAGISel::~SelectionDAGISel() {
delete FuncInfo;
}
-unsigned SelectionDAGISel::MakeReg(MVT VT) {
+unsigned SelectionDAGISel::MakeReg(EVT VT) {
return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
}
void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
AU.addRequired<GCModuleInfo>();
+ AU.addPreserved<GCModuleInfo>();
AU.addRequired<DwarfWriter>();
- AU.setPreservesAll();
+ AU.addPreserved<DwarfWriter>();
+ MachineFunctionPass::getAnalysisUsage(AU);
}
-bool SelectionDAGISel::runOnFunction(Function &Fn) {
+bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
+ Function &Fn = *mf.getFunction();
+
// Do some sanity-checking on the command-line options.
assert((!EnableFastISelVerbose || EnableFastISel) &&
"-fast-isel-verbose requires -fast-isel");
assert((!EnableFastISelAbort || EnableFastISel) &&
"-fast-isel-abort requires -fast-isel");
- // Do not codegen any 'available_externally' functions at all, they have
- // definitions outside the translation unit.
- if (Fn.hasAvailableExternallyLinkage())
- return false;
-
-
// Get alias analysis for load/store combining.
AA = &getAnalysis<AliasAnalysis>();
- TargetMachine &TM = TLI.getTargetMachine();
- MF = &MachineFunction::construct(&Fn, TM);
+ MF = &mf;
const TargetInstrInfo &TII = *TM.getInstrInfo();
const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
- if (MF->getFunction()->hasGC())
- GFI = &getAnalysis<GCModuleInfo>().getFunctionInfo(*MF->getFunction());
+ if (Fn.hasGC())
+ GFI = &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn);
else
GFI = 0;
RegInfo = &MF->getRegInfo();
- DOUT << "\n\n\n=== " << Fn.getName() << "\n";
+ DEBUG(errs() << "\n\n\n=== " << Fn.getName() << "\n");
MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>();
DwarfWriter *DW = getAnalysisIfAvailable<DwarfWriter>();
@@ -358,140 +368,50 @@ static void copyCatchInfo(BasicBlock *SrcBB, BasicBlock *DestBB,
}
}
-/// IsFixedFrameObjectWithPosOffset - Check if object is a fixed frame object and
-/// whether object offset >= 0.
-static bool
-IsFixedFrameObjectWithPosOffset(MachineFrameInfo *MFI, SDValue Op) {
- if (!isa<FrameIndexSDNode>(Op)) return false;
-
- FrameIndexSDNode * FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op);
- int FrameIdx = FrameIdxNode->getIndex();
- return MFI->isFixedObjectIndex(FrameIdx) &&
- MFI->getObjectOffset(FrameIdx) >= 0;
-}
-
-/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could
-/// possibly be overwritten when lowering the outgoing arguments in a tail
-/// call. Currently the implementation of this call is very conservative and
-/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with
-/// virtual registers would be overwritten by direct lowering.
-static bool IsPossiblyOverwrittenArgumentOfTailCall(SDValue Op,
- MachineFrameInfo *MFI) {
- RegisterSDNode * OpReg = NULL;
- if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS ||
- (Op.getOpcode()== ISD::CopyFromReg &&
- (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) &&
- (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) ||
- (Op.getOpcode() == ISD::LOAD &&
- IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(1))) ||
- (Op.getOpcode() == ISD::MERGE_VALUES &&
- Op.getOperand(Op.getResNo()).getOpcode() == ISD::LOAD &&
- IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(Op.getResNo()).
- getOperand(1))))
- return true;
- return false;
-}
-
-/// CheckDAGForTailCallsAndFixThem - This Function looks for CALL nodes in the
-/// DAG and fixes their tailcall attribute operand.
-static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG,
- const TargetLowering& TLI) {
- SDNode * Ret = NULL;
- SDValue Terminator = DAG.getRoot();
-
- // Find RET node.
- if (Terminator.getOpcode() == ISD::RET) {
- Ret = Terminator.getNode();
- }
-
- // Fix tail call attribute of CALL nodes.
- for (SelectionDAG::allnodes_iterator BE = DAG.allnodes_begin(),
- BI = DAG.allnodes_end(); BI != BE; ) {
- --BI;
- if (CallSDNode *TheCall = dyn_cast<CallSDNode>(BI)) {
- SDValue OpRet(Ret, 0);
- SDValue OpCall(BI, 0);
- bool isMarkedTailCall = TheCall->isTailCall();
- // If CALL node has tail call attribute set to true and the call is not
- // eligible (no RET or the target rejects) the attribute is fixed to
- // false. The TargetLowering::IsEligibleForTailCallOptimization function
- // must correctly identify tail call optimizable calls.
- if (!isMarkedTailCall) continue;
- if (Ret==NULL ||
- !TLI.IsEligibleForTailCallOptimization(TheCall, OpRet, DAG)) {
- // Not eligible. Mark CALL node as non tail call. Note that we
- // can modify the call node in place since calls are not CSE'd.
- TheCall->setNotTailCall();
- } else {
- // Look for tail call clobbered arguments. Emit a series of
- // copyto/copyfrom virtual register nodes to protect them.
- SmallVector<SDValue, 32> Ops;
- SDValue Chain = TheCall->getChain(), InFlag;
- Ops.push_back(Chain);
- Ops.push_back(TheCall->getCallee());
- for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; ++i) {
- SDValue Arg = TheCall->getArg(i);
- bool isByVal = TheCall->getArgFlags(i).isByVal();
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- if (!isByVal &&
- IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) {
- MVT VT = Arg.getValueType();
- unsigned VReg = MF.getRegInfo().
- createVirtualRegister(TLI.getRegClassFor(VT));
- Chain = DAG.getCopyToReg(Chain, Arg.getDebugLoc(),
- VReg, Arg, InFlag);
- InFlag = Chain.getValue(1);
- Arg = DAG.getCopyFromReg(Chain, Arg.getDebugLoc(),
- VReg, VT, InFlag);
- Chain = Arg.getValue(1);
- InFlag = Arg.getValue(2);
- }
- Ops.push_back(Arg);
- Ops.push_back(TheCall->getArgFlagsVal(i));
- }
- // Link in chain of CopyTo/CopyFromReg.
- Ops[0] = Chain;
- DAG.UpdateNodeOperands(OpCall, Ops.begin(), Ops.size());
- }
- }
- }
-}
-
void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB,
BasicBlock::iterator Begin,
BasicBlock::iterator End) {
SDL->setCurrentBasicBlock(BB);
-
- // Lower all of the non-terminator instructions.
- for (BasicBlock::iterator I = Begin; I != End; ++I)
+ MetadataContext &TheMetadata = LLVMBB->getParent()->getContext().getMetadata();
+ unsigned MDDbgKind = TheMetadata.getMDKind("dbg");
+
+ // Lower all of the non-terminator instructions. If a call is emitted
+ // as a tail call, cease emitting nodes for this block.
+ for (BasicBlock::iterator I = Begin; I != End && !SDL->HasTailCall; ++I) {
+ if (MDDbgKind) {
+ // Update DebugLoc if debug information is attached with this
+ // instruction.
+ if (MDNode *Dbg = TheMetadata.getMD(MDDbgKind, I)) {
+ DILocation DILoc(Dbg);
+ DebugLoc Loc = ExtractDebugLocation(DILoc, MF->getDebugLocInfo());
+ SDL->setCurDebugLoc(Loc);
+ if (MF->getDefaultDebugLoc().isUnknown())
+ MF->setDefaultDebugLoc(Loc);
+ }
+ }
if (!isa<TerminatorInst>(I))
SDL->visit(*I);
+ }
- // Ensure that all instructions which are used outside of their defining
- // blocks are available as virtual registers. Invoke is handled elsewhere.
- for (BasicBlock::iterator I = Begin; I != End; ++I)
- if (!isa<PHINode>(I) && !isa<InvokeInst>(I))
- SDL->CopyToExportRegsIfNeeded(I);
+ if (!SDL->HasTailCall) {
+ // Ensure that all instructions which are used outside of their defining
+ // blocks are available as virtual registers. Invoke is handled elsewhere.
+ for (BasicBlock::iterator I = Begin; I != End; ++I)
+ if (!isa<PHINode>(I) && !isa<InvokeInst>(I))
+ SDL->CopyToExportRegsIfNeeded(I);
- // Handle PHI nodes in successor blocks.
- if (End == LLVMBB->end()) {
- HandlePHINodesInSuccessorBlocks(LLVMBB);
+ // Handle PHI nodes in successor blocks.
+ if (End == LLVMBB->end()) {
+ HandlePHINodesInSuccessorBlocks(LLVMBB);
- // Lower the terminator after the copies are emitted.
- SDL->visit(*LLVMBB->getTerminator());
+ // Lower the terminator after the copies are emitted.
+ SDL->visit(*LLVMBB->getTerminator());
+ }
}
-
+
// Make sure the root of the DAG is up-to-date.
CurDAG->setRoot(SDL->getControlRoot());
- // Check whether calls in this block are real tail calls. Fix up CALL nodes
- // with correct tailcall attribute so that the target can rely on the tailcall
- // attribute indicating whether the call is really eligible for tail call
- // optimization.
- if (PerformTailCallOpt)
- CheckDAGForTailCallsAndFixThem(*CurDAG, TLI);
-
// Final step, emit the lowered DAG as machine code.
CodeGenAndEmitDAG();
SDL->clear();
@@ -500,51 +420,51 @@ void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB,
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
SmallPtrSet<SDNode*, 128> VisitedNodes;
SmallVector<SDNode*, 128> Worklist;
-
+
Worklist.push_back(CurDAG->getRoot().getNode());
-
+
APInt Mask;
APInt KnownZero;
APInt KnownOne;
-
+
while (!Worklist.empty()) {
SDNode *N = Worklist.back();
Worklist.pop_back();
-
+
// If we've already seen this node, ignore it.
if (!VisitedNodes.insert(N))
continue;
-
+
// Otherwise, add all chain operands to the worklist.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
if (N->getOperand(i).getValueType() == MVT::Other)
Worklist.push_back(N->getOperand(i).getNode());
-
+
// If this is a CopyToReg with a vreg dest, process it.
if (N->getOpcode() != ISD::CopyToReg)
continue;
-
+
unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
if (!TargetRegisterInfo::isVirtualRegister(DestReg))
continue;
-
+
// Ignore non-scalar or non-integer values.
SDValue Src = N->getOperand(2);
- MVT SrcVT = Src.getValueType();
+ EVT SrcVT = Src.getValueType();
if (!SrcVT.isInteger() || SrcVT.isVector())
continue;
-
+
unsigned NumSignBits = CurDAG->ComputeNumSignBits(Src);
Mask = APInt::getAllOnesValue(SrcVT.getSizeInBits());
CurDAG->ComputeMaskedBits(Src, Mask, KnownZero, KnownOne);
-
+
// Only install this information if it tells us something.
if (NumSignBits != 1 || KnownZero != 0 || KnownOne != 0) {
DestReg -= TargetRegisterInfo::FirstVirtualRegister;
- FunctionLoweringInfo &FLI = CurDAG->getFunctionLoweringInfo();
- if (DestReg >= FLI.LiveOutRegInfo.size())
- FLI.LiveOutRegInfo.resize(DestReg+1);
- FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[DestReg];
+ if (DestReg >= FuncInfo->LiveOutRegInfo.size())
+ FuncInfo->LiveOutRegInfo.resize(DestReg+1);
+ FunctionLoweringInfo::LiveOutInfo &LOI =
+ FuncInfo->LiveOutRegInfo[DestReg];
LOI.NumSignBits = NumSignBits;
LOI.KnownOne = KnownOne;
LOI.KnownZero = KnownZero;
@@ -560,10 +480,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewLegalizeDAGs ||
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
ViewSUnitDAGs)
- BlockName = CurDAG->getMachineFunction().getFunction()->getName() + ':' +
- BB->getBasicBlock()->getName();
+ BlockName = MF->getFunction()->getNameStr() + ":" +
+ BB->getBasicBlock()->getNameStr();
- DOUT << "Initial selection DAG:\n";
+ DEBUG(errs() << "Initial selection DAG:\n");
DEBUG(CurDAG->dump());
if (ViewDAGCombine1) CurDAG->viewGraph("dag-combine1 input for " + BlockName);
@@ -575,10 +495,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
} else {
CurDAG->Combine(Unrestricted, *AA, OptLevel);
}
-
- DOUT << "Optimized lowered selection DAG:\n";
+
+ DEBUG(errs() << "Optimized lowered selection DAG:\n");
DEBUG(CurDAG->dump());
-
+
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
if (!DisableLegalizeTypes) {
@@ -593,7 +513,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
Changed = CurDAG->LegalizeTypes();
}
- DOUT << "Type-legalized selection DAG:\n";
+ DEBUG(errs() << "Type-legalized selection DAG:\n");
DEBUG(CurDAG->dump());
if (Changed) {
@@ -608,7 +528,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
}
- DOUT << "Optimized type-legalized selection DAG:\n";
+ DEBUG(errs() << "Optimized type-legalized selection DAG:\n");
DEBUG(CurDAG->dump());
}
@@ -638,11 +558,11 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
- DOUT << "Optimized vector-legalized selection DAG:\n";
+ DEBUG(errs() << "Optimized vector-legalized selection DAG:\n");
DEBUG(CurDAG->dump());
}
}
-
+
if (ViewLegalizeDAGs) CurDAG->viewGraph("legalize input for " + BlockName);
if (TimePassesIsEnabled) {
@@ -651,10 +571,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
} else {
CurDAG->Legalize(DisableLegalizeTypes, OptLevel);
}
-
- DOUT << "Legalized selection DAG:\n";
+
+ DEBUG(errs() << "Legalized selection DAG:\n");
DEBUG(CurDAG->dump());
-
+
if (ViewDAGCombine2) CurDAG->viewGraph("dag-combine2 input for " + BlockName);
// Run the DAG combiner in post-legalize mode.
@@ -664,12 +584,12 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
} else {
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
-
- DOUT << "Optimized legalized selection DAG:\n";
+
+ DEBUG(errs() << "Optimized legalized selection DAG:\n");
DEBUG(CurDAG->dump());
if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName);
-
+
if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
@@ -682,7 +602,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
InstructionSelect();
}
- DOUT << "Selected selection DAG:\n";
+ DEBUG(errs() << "Selected selection DAG:\n");
DEBUG(CurDAG->dump());
if (ViewSchedDAGs) CurDAG->viewGraph("scheduler input for " + BlockName);
@@ -698,13 +618,13 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
if (ViewSUnitDAGs) Scheduler->viewGraph();
- // Emit machine code to BB. This can change 'BB' to the last block being
+ // Emit machine code to BB. This can change 'BB' to the last block being
// inserted into.
if (TimePassesIsEnabled) {
NamedRegionTimer T("Instruction Creation", GroupName);
- BB = Scheduler->EmitSchedule();
+ BB = Scheduler->EmitSchedule(&SDL->EdgeMapping);
} else {
- BB = Scheduler->EmitSchedule();
+ BB = Scheduler->EmitSchedule(&SDL->EdgeMapping);
}
// Free the scheduler state.
@@ -715,9 +635,9 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
delete Scheduler;
}
- DOUT << "Selected machine code:\n";
+ DEBUG(errs() << "Selected machine code:\n");
DEBUG(BB->dump());
-}
+}
void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
MachineFunction &MF,
@@ -736,6 +656,9 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
#endif
);
+ MetadataContext &TheMetadata = Fn.getContext().getMetadata();
+ unsigned MDDbgKind = TheMetadata.getMDKind("dbg");
+
// Iterate over all basic blocks in the function.
for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
BasicBlock *LLVMBB = &*I;
@@ -758,7 +681,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
I != E; ++I, ++j)
if (Fn.paramHasAttr(j, Attribute::ByVal)) {
if (EnableFastISelVerbose || EnableFastISelAbort)
- cerr << "FastISel skips entry block due to byval argument\n";
+ errs() << "FastISel skips entry block due to byval argument\n";
SuppressFastISel = true;
break;
}
@@ -818,16 +741,29 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
FastIS->startNewBlock(BB);
// Do FastISel on as many instructions as possible.
for (; BI != End; ++BI) {
+ if (MDDbgKind) {
+ // Update DebugLoc if debug information is attached with this
+ // instruction.
+ if (MDNode *Dbg = TheMetadata.getMD(MDDbgKind, BI)) {
+ DILocation DILoc(Dbg);
+ DebugLoc Loc = ExtractDebugLocation(DILoc,
+ MF.getDebugLocInfo());
+ FastIS->setCurDebugLoc(Loc);
+ if (MF.getDefaultDebugLoc().isUnknown())
+ MF.setDefaultDebugLoc(Loc);
+ }
+ }
+
// Just before the terminator instruction, insert instructions to
// feed PHI nodes in successor blocks.
if (isa<TerminatorInst>(BI))
if (!HandlePHINodesInSuccessorBlocksFast(LLVMBB, FastIS)) {
if (EnableFastISelVerbose || EnableFastISelAbort) {
- cerr << "FastISel miss: ";
+ errs() << "FastISel miss: ";
BI->dump();
}
- if (EnableFastISelAbort)
- assert(0 && "FastISel didn't handle a PHI in a successor");
+ assert(!EnableFastISelAbort &&
+ "FastISel didn't handle a PHI in a successor");
break;
}
@@ -842,11 +778,11 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
// Then handle certain instructions as single-LLVM-Instruction blocks.
if (isa<CallInst>(BI)) {
if (EnableFastISelVerbose || EnableFastISelAbort) {
- cerr << "FastISel missed call: ";
+ errs() << "FastISel missed call: ";
BI->dump();
}
- if (BI->getType() != Type::VoidTy) {
+ if (BI->getType() != Type::getVoidTy(*CurDAG->getContext())) {
unsigned &R = FuncInfo->ValueMap[BI];
if (!R)
R = FuncInfo->CreateRegForValue(BI);
@@ -864,13 +800,13 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
// For now, be a little lenient about non-branch terminators.
if (!isa<TerminatorInst>(BI) || isa<BranchInst>(BI)) {
if (EnableFastISelVerbose || EnableFastISelAbort) {
- cerr << "FastISel miss: ";
+ errs() << "FastISel miss: ";
BI->dump();
}
if (EnableFastISelAbort)
// The "fast" selector couldn't handle something and bailed.
// For the purpose of debugging, just abort.
- assert(0 && "FastISel didn't select the entire block");
+ llvm_unreachable("FastISel didn't select the entire block");
}
break;
}
@@ -895,15 +831,16 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
void
SelectionDAGISel::FinishBasicBlock() {
- DOUT << "Target-post-processed machine code:\n";
+ DEBUG(errs() << "Target-post-processed machine code:\n");
DEBUG(BB->dump());
- DOUT << "Total amount of phi nodes to update: "
- << SDL->PHINodesToUpdate.size() << "\n";
+ DEBUG(errs() << "Total amount of phi nodes to update: "
+ << SDL->PHINodesToUpdate.size() << "\n");
DEBUG(for (unsigned i = 0, e = SDL->PHINodesToUpdate.size(); i != e; ++i)
- DOUT << "Node " << i << " : (" << SDL->PHINodesToUpdate[i].first
- << ", " << SDL->PHINodesToUpdate[i].second << ")\n";);
-
+ errs() << "Node " << i << " : ("
+ << SDL->PHINodesToUpdate[i].first
+ << ", " << SDL->PHINodesToUpdate[i].second << ")\n");
+
// Next, now that we know what the last MBB the LLVM BB expanded is, update
// PHI nodes in successors.
if (SDL->SwitchCases.empty() &&
@@ -932,7 +869,7 @@ SelectionDAGISel::FinishBasicBlock() {
CurDAG->setRoot(SDL->getRoot());
CodeGenAndEmitDAG();
SDL->clear();
- }
+ }
for (unsigned j = 0, ej = SDL->BitTestCases[i].Cases.size(); j != ej; ++j) {
// Set the current basic block to the mbb we wish to insert the code into
@@ -947,8 +884,8 @@ SelectionDAGISel::FinishBasicBlock() {
SDL->visitBitTestCase(SDL->BitTestCases[i].Default,
SDL->BitTestCases[i].Reg,
SDL->BitTestCases[i].Cases[j]);
-
-
+
+
CurDAG->setRoot(SDL->getRoot());
CodeGenAndEmitDAG();
SDL->clear();
@@ -1001,7 +938,7 @@ SelectionDAGISel::FinishBasicBlock() {
CodeGenAndEmitDAG();
SDL->clear();
}
-
+
// Set the current basic block to the mbb we wish to insert the code into
BB = SDL->JTCases[i].second.MBB;
SDL->setCurrentBasicBlock(BB);
@@ -1010,7 +947,7 @@ SelectionDAGISel::FinishBasicBlock() {
CurDAG->setRoot(SDL->getRoot());
CodeGenAndEmitDAG();
SDL->clear();
-
+
// Update PHI Nodes
for (unsigned pi = 0, pe = SDL->PHINodesToUpdate.size(); pi != pe; ++pi) {
MachineInstr *PHI = SDL->PHINodesToUpdate[pi].first;
@@ -1019,20 +956,21 @@ SelectionDAGISel::FinishBasicBlock() {
"This is not a machine PHI node that we are updating!");
// "default" BB. We can go there only from header BB.
if (PHIBB == SDL->JTCases[i].second.Default) {
- PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second,
- false));
- PHI->addOperand(MachineOperand::CreateMBB(SDL->JTCases[i].first.HeaderBB));
+ PHI->addOperand
+ (MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second, false));
+ PHI->addOperand
+ (MachineOperand::CreateMBB(SDL->JTCases[i].first.HeaderBB));
}
// JT BB. Just iterate over successors here
if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
- PHI->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second,
- false));
+ PHI->addOperand
+ (MachineOperand::CreateReg(SDL->PHINodesToUpdate[pi].second, false));
PHI->addOperand(MachineOperand::CreateMBB(BB));
}
}
}
SDL->JTCases.clear();
-
+
// If the switch block involved a branch to one of the actual successors, we
// need to update PHI nodes in that block.
for (unsigned i = 0, e = SDL->PHINodesToUpdate.size(); i != e; ++i) {
@@ -1045,25 +983,31 @@ SelectionDAGISel::FinishBasicBlock() {
PHI->addOperand(MachineOperand::CreateMBB(BB));
}
}
-
+
// If we generated any switch lowering information, build and codegen any
// additional DAGs necessary.
for (unsigned i = 0, e = SDL->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDL->SwitchCases[i].ThisBB;
+ MachineBasicBlock *ThisBB = BB = SDL->SwitchCases[i].ThisBB;
SDL->setCurrentBasicBlock(BB);
-
+
// Emit the code
SDL->visitSwitchCase(SDL->SwitchCases[i]);
CurDAG->setRoot(SDL->getRoot());
CodeGenAndEmitDAG();
- SDL->clear();
-
+
// Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can
// occur multiple times in PHINodesToUpdate. We have to be very careful to
// handle them the right number of times.
while ((BB = SDL->SwitchCases[i].TrueBB)) { // Handle LHS and RHS.
+ // If new BB's are created during scheduling, the edges may have been
+ // updated. That is, the edge from ThisBB to BB may have been split and
+ // BB's predecessor is now another block.
+ DenseMap<MachineBasicBlock*, MachineBasicBlock*>::iterator EI =
+ SDL->EdgeMapping.find(BB);
+ if (EI != SDL->EdgeMapping.end())
+ ThisBB = EI->second;
for (MachineBasicBlock::iterator Phi = BB->begin();
Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){
// This value for this PHI node is recorded in PHINodesToUpdate, get it.
@@ -1073,21 +1017,22 @@ SelectionDAGISel::FinishBasicBlock() {
if (SDL->PHINodesToUpdate[pn].first == Phi) {
Phi->addOperand(MachineOperand::CreateReg(SDL->PHINodesToUpdate[pn].
second, false));
- Phi->addOperand(MachineOperand::CreateMBB(SDL->SwitchCases[i].ThisBB));
+ Phi->addOperand(MachineOperand::CreateMBB(ThisBB));
break;
}
}
}
-
+
// Don't process RHS if same block as LHS.
if (BB == SDL->SwitchCases[i].FalseBB)
SDL->SwitchCases[i].FalseBB = 0;
-
+
// If we haven't handled the RHS, do so now. Otherwise, we're done.
SDL->SwitchCases[i].TrueBB = SDL->SwitchCases[i].FalseBB;
SDL->SwitchCases[i].FalseBB = 0;
}
assert(SDL->SwitchCases[i].TrueBB == 0 && SDL->SwitchCases[i].FalseBB == 0);
+ SDL->clear();
}
SDL->SwitchCases.clear();
@@ -1101,12 +1046,12 @@ SelectionDAGISel::FinishBasicBlock() {
///
ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault();
-
+
if (!Ctor) {
Ctor = ISHeuristic;
RegisterScheduler::setDefault(Ctor);
}
-
+
return Ctor(this, OptLevel);
}
@@ -1123,25 +1068,25 @@ ScheduleHazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
/// the dag combiner simplified the 255, we still want to match. RHS is the
/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
/// specified in the .td file (e.g. 255).
-bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
+bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
-
+
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
return true;
-
+
// If the actual AND mask is allowing unallowed bits, this doesn't match.
if (ActualMask.intersects(~DesiredMask))
return false;
-
+
// Otherwise, the DAG Combiner may have proven that the value coming in is
// either already zero or is not demanded. Check for known zero input bits.
APInt NeededMask = DesiredMask & ~ActualMask;
if (CurDAG->MaskedValueIsZero(LHS, NeededMask))
return true;
-
+
// TODO: check to see if missing bits are just not demanded.
// Otherwise, this pattern doesn't match.
@@ -1152,32 +1097,32 @@ bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
/// the dag combiner simplified the 255, we still want to match. RHS is the
/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
/// specified in the .td file (e.g. 255).
-bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
+bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
-
+
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
return true;
-
+
// If the actual AND mask is allowing unallowed bits, this doesn't match.
if (ActualMask.intersects(~DesiredMask))
return false;
-
+
// Otherwise, the DAG Combiner may have proven that the value coming in is
// either already zero or is not demanded. Check for known zero input bits.
APInt NeededMask = DesiredMask & ~ActualMask;
-
+
APInt KnownZero, KnownOne;
CurDAG->ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
-
+
// If all the missing bits in the or are already known to be set, match!
if ((NeededMask & KnownOne) == NeededMask)
return true;
-
+
// TODO: check to see if missing bits are just not demanded.
-
+
// Otherwise, this pattern doesn't match.
return false;
}
@@ -1196,7 +1141,7 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
unsigned i = 2, e = InOps.size();
if (InOps[e-1].getValueType() == MVT::Flag)
--e; // Don't process a flag operand if it is here.
-
+
while (i != e) {
unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
if ((Flags & 7) != 4 /*MEM*/) {
@@ -1210,25 +1155,25 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
// Otherwise, this is a memory operand. Ask the target to select it.
std::vector<SDValue> SelOps;
if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps)) {
- cerr << "Could not match memory address. Inline asm failure!\n";
- exit(1);
+ llvm_report_error("Could not match memory address. Inline asm"
+ " failure!");
}
-
+
// Add this to the output node.
- MVT IntPtrTy = CurDAG->getTargetLoweringInfo().getPointerTy();
+ EVT IntPtrTy = TLI.getPointerTy();
Ops.push_back(CurDAG->getTargetConstant(4/*MEM*/ | (SelOps.size()<< 3),
IntPtrTy));
Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
i += 2;
}
}
-
+
// Add the flag input back if present.
if (e != InOps.size())
Ops.push_back(InOps.back());
}
-/// findFlagUse - Return use of MVT::Flag value produced by the specified
+/// findFlagUse - Return use of EVT::Flag value produced by the specified
/// SDNode.
///
static SDNode *findFlagUse(SDNode *N) {
@@ -1331,7 +1276,7 @@ bool SelectionDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
// Fold. But since Fold and FU are flagged together, this will create
// a cycle in the scheduling graph.
- MVT VT = Root->getValueType(Root->getNumValues()-1);
+ EVT VT = Root->getValueType(Root->getNumValues()-1);
while (VT == MVT::Flag) {
SDNode *FU = findFlagUse(Root);
if (FU == NULL)
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 6fd5df2..ccc5e3c 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -44,7 +44,7 @@ namespace llvm {
}
static std::string getEdgeDestLabel(const void *Node, unsigned i) {
- return ((const SDNode *) Node)->getValueType(i).getMVTString();
+ return ((const SDNode *) Node)->getValueType(i).getEVTString();
}
/// edgeTargetsEdgeSource - This method returns true if this outgoing edge
@@ -84,7 +84,7 @@ namespace llvm {
template<typename EdgeIter>
static std::string getEdgeAttributes(const void *Node, EdgeIter EI) {
SDValue Op = EI.getNode()->getOperand(EI.getOperand());
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
if (VT == MVT::Flag)
return "color=red,style=bold";
else if (VT == MVT::Other)
@@ -138,11 +138,11 @@ std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node,
void SelectionDAG::viewGraph(const std::string &Title) {
// This code is only for debugging!
#ifndef NDEBUG
- ViewGraph(this, "dag." + getMachineFunction().getFunction()->getName(), false,
- Title);
+ ViewGraph(this, "dag." + getMachineFunction().getFunction()->getNameStr(),
+ false, Title);
#else
- cerr << "SelectionDAG::viewGraph is only available in debug builds on "
- << "systems with Graphviz or gv!\n";
+ errs() << "SelectionDAG::viewGraph is only available in debug builds on "
+ << "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
@@ -158,8 +158,8 @@ void SelectionDAG::clearGraphAttrs() {
#ifndef NDEBUG
NodeGraphAttrs.clear();
#else
- cerr << "SelectionDAG::clearGraphAttrs is only available in debug builds"
- << " on systems with Graphviz or gv!\n";
+ errs() << "SelectionDAG::clearGraphAttrs is only available in debug builds"
+ << " on systems with Graphviz or gv!\n";
#endif
}
@@ -170,8 +170,8 @@ void SelectionDAG::setGraphAttrs(const SDNode *N, const char *Attrs) {
#ifndef NDEBUG
NodeGraphAttrs[N] = Attrs;
#else
- cerr << "SelectionDAG::setGraphAttrs is only available in debug builds"
- << " on systems with Graphviz or gv!\n";
+ errs() << "SelectionDAG::setGraphAttrs is only available in debug builds"
+ << " on systems with Graphviz or gv!\n";
#endif
}
@@ -188,8 +188,8 @@ const std::string SelectionDAG::getGraphAttrs(const SDNode *N) const {
else
return "";
#else
- cerr << "SelectionDAG::getGraphAttrs is only available in debug builds"
- << " on systems with Graphviz or gv!\n";
+ errs() << "SelectionDAG::getGraphAttrs is only available in debug builds"
+ << " on systems with Graphviz or gv!\n";
return std::string("");
#endif
}
@@ -200,8 +200,8 @@ void SelectionDAG::setGraphColor(const SDNode *N, const char *Color) {
#ifndef NDEBUG
NodeGraphAttrs[N] = std::string("color=") + Color;
#else
- cerr << "SelectionDAG::setGraphColor is only available in debug builds"
- << " on systems with Graphviz or gv!\n";
+ errs() << "SelectionDAG::setGraphColor is only available in debug builds"
+ << " on systems with Graphviz or gv!\n";
#endif
}
@@ -216,7 +216,7 @@ bool SelectionDAG::setSubgraphColorHelper(SDNode *N, const char *Color, DenseSet
if (level >= 20) {
if (!printed) {
printed = true;
- DOUT << "setSubgraphColor hit max level\n";
+ DEBUG(errs() << "setSubgraphColor hit max level\n");
}
return true;
}
@@ -232,8 +232,8 @@ bool SelectionDAG::setSubgraphColorHelper(SDNode *N, const char *Color, DenseSet
}
}
#else
- cerr << "SelectionDAG::setSubgraphColor is only available in debug builds"
- << " on systems with Graphviz or gv!\n";
+ errs() << "SelectionDAG::setSubgraphColor is only available in debug builds"
+ << " on systems with Graphviz or gv!\n";
#endif
return hit_limit;
}
@@ -255,8 +255,8 @@ void SelectionDAG::setSubgraphColor(SDNode *N, const char *Color) {
}
#else
- cerr << "SelectionDAG::setSubgraphColor is only available in debug builds"
- << " on systems with Graphviz or gv!\n";
+ errs() << "SelectionDAG::setSubgraphColor is only available in debug builds"
+ << " on systems with Graphviz or gv!\n";
#endif
}
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 83357e0..a2baee4 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -11,18 +11,20 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtarget.h"
#include "llvm/GlobalVariable.h"
#include "llvm/DerivedTypes.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
@@ -239,12 +241,23 @@ static void InitLibcallNames(const char **Names) {
Names[RTLIB::UO_F64] = "__unorddf2";
Names[RTLIB::O_F32] = "__unordsf2";
Names[RTLIB::O_F64] = "__unorddf2";
+ Names[RTLIB::MEMCPY] = "memcpy";
+ Names[RTLIB::MEMMOVE] = "memmove";
+ Names[RTLIB::MEMSET] = "memset";
Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
}
+/// InitLibcallCallingConvs - Set default libcall CallingConvs.
+///
+static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
+ for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
+ CCs[i] = CallingConv::C;
+ }
+}
+
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
-RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) {
+RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f32) {
if (RetVT == MVT::f64)
return FPEXT_F32_F64;
@@ -254,7 +267,7 @@ RTLIB::Libcall RTLIB::getFPEXT(MVT OpVT, MVT RetVT) {
/// getFPROUND - Return the FPROUND_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
-RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) {
+RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::f32) {
if (OpVT == MVT::f64)
return FPROUND_F64_F32;
@@ -273,7 +286,7 @@ RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) {
/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
-RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) {
+RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOSINT_F32_I8;
@@ -312,7 +325,7 @@ RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) {
/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
-RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) {
+RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOUINT_F32_I8;
@@ -351,7 +364,7 @@ RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) {
/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
-RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) {
+RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::i32) {
if (RetVT == MVT::f32)
return SINTTOFP_I32_F32;
@@ -385,7 +398,7 @@ RTLIB::Libcall RTLIB::getSINTTOFP(MVT OpVT, MVT RetVT) {
/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
-RTLIB::Libcall RTLIB::getUINTTOFP(MVT OpVT, MVT RetVT) {
+RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::i32) {
if (RetVT == MVT::f32)
return UINTTOFP_I32_F32;
@@ -439,8 +452,9 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
CCs[RTLIB::O_F64] = ISD::SETEQ;
}
-TargetLowering::TargetLowering(TargetMachine &tm)
- : TM(tm), TD(TM.getTargetData()) {
+/// NOTE: The constructor takes ownership of TLOF.
+TargetLowering::TargetLowering(TargetMachine &tm,TargetLoweringObjectFile *tlof)
+ : TM(tm), TD(TM.getTargetData()), TLOF(*tlof) {
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadExtActions, 0, sizeof(LoadExtActions));
@@ -490,12 +504,10 @@ TargetLowering::TargetLowering(TargetMachine &tm)
IsLittleEndian = TD->isLittleEndian();
UsesGlobalOffsetTable = false;
- ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType());
- ShiftAmtHandling = Undefined;
+ ShiftAmountTy = PointerTy = MVT::getIntegerVT(8*TD->getPointerSize());
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
- allowUnalignedMemoryAccesses = false;
benefitFromCodePlacementOpt = false;
UseUnderscoreSetJmp = false;
UseUnderscoreLongJmp = false;
@@ -515,14 +527,62 @@ TargetLowering::TargetLowering(TargetMachine &tm)
InitLibcallNames(LibcallRoutineNames);
InitCmpLibcallCCs(CmpLibcallCCs);
+ InitLibcallCallingConvs(LibcallCallingConvs);
// Tell Legalize whether the assembler supports DEBUG_LOC.
- const TargetAsmInfo *TASM = TM.getTargetAsmInfo();
+ const MCAsmInfo *TASM = TM.getMCAsmInfo();
if (!TASM || !TASM->hasDotLocAndDotFile())
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
}
-TargetLowering::~TargetLowering() {}
+TargetLowering::~TargetLowering() {
+ delete &TLOF;
+}
+
+static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
+ unsigned &NumIntermediates,
+ EVT &RegisterVT,
+ TargetLowering* TLI) {
+ // Figure out the right, legal destination reg to copy into.
+ unsigned NumElts = VT.getVectorNumElements();
+ MVT EltTy = VT.getVectorElementType();
+
+ unsigned NumVectorRegs = 1;
+
+ // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
+ // could break down into LHS/RHS like LegalizeDAG does.
+ if (!isPowerOf2_32(NumElts)) {
+ NumVectorRegs = NumElts;
+ NumElts = 1;
+ }
+
+ // Divide the input until we get to a supported size. This will always
+ // end with a scalar if the target doesn't support vectors.
+ while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
+ NumElts >>= 1;
+ NumVectorRegs <<= 1;
+ }
+
+ NumIntermediates = NumVectorRegs;
+
+ MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
+ if (!TLI->isTypeLegal(NewVT))
+ NewVT = EltTy;
+ IntermediateVT = NewVT;
+
+ EVT DestVT = TLI->getRegisterType(NewVT);
+ RegisterVT = DestVT;
+ if (EVT(DestVT).bitsLT(NewVT)) {
+ // Value is expanded, e.g. i64 -> i16.
+ return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
+ } else {
+ // Otherwise, promotion or legal types use the same number of registers as
+ // the vector decimated to the appropriate level.
+ return NumVectorRegs;
+ }
+
+ return 1;
+}
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
@@ -546,13 +606,13 @@ void TargetLowering::computeRegisterProperties() {
// Every integer value type larger than this largest register takes twice as
// many registers to represent as the previous ValueType.
for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) {
- MVT EVT = (MVT::SimpleValueType)ExpandedReg;
- if (!EVT.isInteger())
+ EVT ExpandedVT = (MVT::SimpleValueType)ExpandedReg;
+ if (!ExpandedVT.isInteger())
break;
NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
- ValueTypeActions.setTypeAction(EVT, Expand);
+ ValueTypeActions.setTypeAction(ExpandedVT, Expand);
}
// Inspect all of the ValueType's smaller than the largest integer
@@ -560,7 +620,7 @@ void TargetLowering::computeRegisterProperties() {
unsigned LegalIntReg = LargestIntReg;
for (unsigned IntReg = LargestIntReg - 1;
IntReg >= (unsigned)MVT::i1; --IntReg) {
- MVT IVT = (MVT::SimpleValueType)IntReg;
+ EVT IVT = (MVT::SimpleValueType)IntReg;
if (isTypeLegal(IVT)) {
LegalIntReg = IntReg;
} else {
@@ -608,20 +668,20 @@ void TargetLowering::computeRegisterProperties() {
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
MVT VT = (MVT::SimpleValueType)i;
if (!isTypeLegal(VT)) {
- MVT IntermediateVT, RegisterVT;
+ MVT IntermediateVT;
+ EVT RegisterVT;
unsigned NumIntermediates;
NumRegistersForVT[i] =
- getVectorTypeBreakdown(VT,
- IntermediateVT, NumIntermediates,
- RegisterVT);
+ getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
+ RegisterVT, this);
RegisterTypeForVT[i] = RegisterVT;
// Determine if there is a legal wider type.
bool IsLegalWiderType = false;
- MVT EltVT = VT.getVectorElementType();
+ EVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- MVT SVT = (MVT::SimpleValueType)nVT;
+ EVT SVT = (MVT::SimpleValueType)nVT;
if (isTypeLegal(SVT) && SVT.getVectorElementType() == EltVT &&
SVT.getVectorNumElements() > NElts) {
TransformToType[i] = SVT;
@@ -631,7 +691,7 @@ void TargetLowering::computeRegisterProperties() {
}
}
if (!IsLegalWiderType) {
- MVT NVT = VT.getPow2VectorType();
+ EVT NVT = VT.getPow2VectorType();
if (NVT == VT) {
// Type is already a power of 2. The default action is to split.
TransformToType[i] = MVT::Other;
@@ -650,11 +710,10 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
}
-MVT TargetLowering::getSetCCResultType(MVT VT) const {
- return getValueType(TD->getIntPtrType());
+MVT::SimpleValueType TargetLowering::getSetCCResultType(EVT VT) const {
+ return PointerTy.SimpleTy;
}
-
/// getVectorTypeBreakdown - Vector types are broken down into some number of
/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
@@ -664,13 +723,13 @@ MVT TargetLowering::getSetCCResultType(MVT VT) const {
/// register. It also returns the VT and quantity of the intermediate values
/// before they are promoted/expanded.
///
-unsigned TargetLowering::getVectorTypeBreakdown(MVT VT,
- MVT &IntermediateVT,
+unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
+ EVT &IntermediateVT,
unsigned &NumIntermediates,
- MVT &RegisterVT) const {
+ EVT &RegisterVT) const {
// Figure out the right, legal destination reg to copy into.
unsigned NumElts = VT.getVectorNumElements();
- MVT EltTy = VT.getVectorElementType();
+ EVT EltTy = VT.getVectorElementType();
unsigned NumVectorRegs = 1;
@@ -683,19 +742,20 @@ unsigned TargetLowering::getVectorTypeBreakdown(MVT VT,
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
- while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
+ while (NumElts > 1 && !isTypeLegal(
+ EVT::getVectorVT(Context, EltTy, NumElts))) {
NumElts >>= 1;
NumVectorRegs <<= 1;
}
NumIntermediates = NumVectorRegs;
- MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
+ EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
if (!isTypeLegal(NewVT))
NewVT = EltTy;
IntermediateVT = NewVT;
- MVT DestVT = getRegisterType(NewVT);
+ EVT DestVT = getRegisterType(Context, NewVT);
RegisterVT = DestVT;
if (DestVT.bitsLT(NewVT)) {
// Value is expanded, e.g. i64 -> i16.
@@ -714,7 +774,7 @@ unsigned TargetLowering::getVectorTypeBreakdown(MVT VT,
/// If there is no vector type that we want to widen to, returns MVT::Other
/// When and where to widen is target dependent based on the cost of
/// scalarizing vs using the wider vector type.
-MVT TargetLowering::getWidenVectorType(MVT VT) const {
+EVT TargetLowering::getWidenVectorType(EVT VT) const {
assert(VT.isVector());
if (isTypeLegal(VT))
return VT;
@@ -781,7 +841,7 @@ bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op,
// if we can expand it to have all bits set, do it
if (C->getAPIntValue().intersects(~Demanded)) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0),
DAG.getConstant(Demanded &
C->getAPIntValue(),
@@ -822,7 +882,7 @@ TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op,
if (!isPowerOf2_32(SmallVTBits))
SmallVTBits = NextPowerOf2(SmallVTBits);
for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) {
- MVT SmallVT = MVT::getIntegerVT(SmallVTBits);
+ EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits);
if (TLI.isTruncateFree(Op.getValueType(), SmallVT) &&
TLI.isZExtFree(SmallVT, Op.getValueType())) {
// We found a type with free casts.
@@ -1008,7 +1068,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known
if ((KnownOne & KnownOne2) == KnownOne) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,
Op.getOperand(0), ANDC));
@@ -1023,7 +1083,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// if we can expand it to have all bits set, do it
if (Expanded.isAllOnesValue()) {
if (Expanded != C->getAPIntValue()) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
TLO.DAG.getConstant(Expanded, VT));
return TLO.CombineTo(Op, New);
@@ -1099,7 +1159,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
SDValue NewSA =
TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT,
InOp.getOperand(0), NewSA));
}
@@ -1116,7 +1176,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
case ISD::SRL:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
unsigned ShAmt = SA->getZExtValue();
unsigned VTSize = VT.getSizeInBits();
SDValue InOp = Op.getOperand(0);
@@ -1168,7 +1228,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
Op.getOperand(0), Op.getOperand(1)));
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- MVT VT = Op.getValueType();
+ EVT VT = Op.getValueType();
unsigned ShAmt = SA->getZExtValue();
// If the shift count is an invalid immediate, don't do anything.
@@ -1205,7 +1265,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
break;
case ISD::SIGN_EXTEND_INREG: {
- MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
@@ -1272,7 +1332,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
}
case ISD::SIGN_EXTEND: {
- MVT InVT = Op.getOperand(0).getValueType();
+ EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getSizeInBits();
APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
@@ -1371,7 +1431,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
}
case ISD::AssertZext: {
- MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
APInt InMask = APInt::getLowBitsSet(BitWidth,
VT.getSizeInBits());
if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask,
@@ -1385,7 +1445,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
#if 0
// If this is an FP->Int bitcast and if the sign bit is the only thing that
// is demanded, turn this into a FGETSIGN.
- if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) &&
+ if (NewMask == EVT::getIntegerVTSignBit(Op.getValueType()) &&
MVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
!MVT::isVector(Op.getOperand(0).getValueType())) {
// Only do this xform if FGETSIGN is valid or if before legalize.
@@ -1492,7 +1552,7 @@ static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
// to handle some common cases.
// Fall back to ComputeMaskedBits to catch other known cases.
- MVT OpVT = Val.getValueType();
+ EVT OpVT = Val.getValueType();
unsigned BitWidth = OpVT.getSizeInBits();
APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt KnownZero, KnownOne;
@@ -1504,10 +1564,11 @@ static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) {
/// SimplifySetCC - Try to simplify a setcc built with the specified operands
/// and cc. If it is unable to simplify it, return a null SDValue.
SDValue
-TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
+TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
DAGCombinerInfo &DCI, DebugLoc dl) const {
SelectionDAG &DAG = DCI.DAG;
+ LLVMContext &Context = *DAG.getContext();
// These setcc operations always fold.
switch (Cond) {
@@ -1518,316 +1579,321 @@ TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
case ISD::SETTRUE2: return DAG.getConstant(1, VT);
}
+ if (isa<ConstantSDNode>(N0.getNode())) {
+ // Ensure that the constant occurs on the RHS, and fold constant
+ // comparisons.
+ return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
+ }
+
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
const APInt &C1 = N1C->getAPIntValue();
- if (isa<ConstantSDNode>(N0.getNode())) {
- return DAG.FoldSetCC(VT, N0, N1, Cond, dl);
- } else {
- // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
- // equality comparison, then we're just comparing whether X itself is
- // zero.
- if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
- N0.getOperand(0).getOpcode() == ISD::CTLZ &&
- N0.getOperand(1).getOpcode() == ISD::Constant) {
- unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
- if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
- ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
- if ((C1 == 0) == (Cond == ISD::SETEQ)) {
- // (srl (ctlz x), 5) == 0 -> X != 0
- // (srl (ctlz x), 5) != 1 -> X != 0
- Cond = ISD::SETNE;
- } else {
- // (srl (ctlz x), 5) != 0 -> X == 0
- // (srl (ctlz x), 5) == 1 -> X == 0
- Cond = ISD::SETEQ;
- }
- SDValue Zero = DAG.getConstant(0, N0.getValueType());
- return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
- Zero, Cond);
+
+ // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an
+ // equality comparison, then we're just comparing whether X itself is
+ // zero.
+ if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) &&
+ N0.getOperand(0).getOpcode() == ISD::CTLZ &&
+ N0.getOperand(1).getOpcode() == ISD::Constant) {
+ unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
+ if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+ ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
+ if ((C1 == 0) == (Cond == ISD::SETEQ)) {
+ // (srl (ctlz x), 5) == 0 -> X != 0
+ // (srl (ctlz x), 5) != 1 -> X != 0
+ Cond = ISD::SETNE;
+ } else {
+ // (srl (ctlz x), 5) != 0 -> X == 0
+ // (srl (ctlz x), 5) == 1 -> X == 0
+ Cond = ISD::SETEQ;
}
+ SDValue Zero = DAG.getConstant(0, N0.getValueType());
+ return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0),
+ Zero, Cond);
}
+ }
- // If the LHS is '(and load, const)', the RHS is 0,
- // the test is for equality or unsigned, and all 1 bits of the const are
- // in the same partial word, see if we can shorten the load.
- if (DCI.isBeforeLegalize() &&
- N0.getOpcode() == ISD::AND && C1 == 0 &&
- N0.getNode()->hasOneUse() &&
- isa<LoadSDNode>(N0.getOperand(0)) &&
- N0.getOperand(0).getNode()->hasOneUse() &&
- isa<ConstantSDNode>(N0.getOperand(1))) {
- LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
- uint64_t bestMask = 0;
- unsigned bestWidth = 0, bestOffset = 0;
- if (!Lod->isVolatile() && Lod->isUnindexed() &&
- // FIXME: This uses getZExtValue() below so it only works on i64 and
- // below.
- N0.getValueType().getSizeInBits() <= 64) {
- unsigned origWidth = N0.getValueType().getSizeInBits();
- // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
- // 8 bits, but have to be careful...
- if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
- origWidth = Lod->getMemoryVT().getSizeInBits();
- uint64_t Mask =cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
- for (unsigned width = origWidth / 2; width>=8; width /= 2) {
- uint64_t newMask = (1ULL << width) - 1;
- for (unsigned offset=0; offset<origWidth/width; offset++) {
- if ((newMask & Mask) == Mask) {
- if (!TD->isLittleEndian())
- bestOffset = (origWidth/width - offset - 1) * (width/8);
- else
- bestOffset = (uint64_t)offset * (width/8);
- bestMask = Mask >> (offset * (width/8) * 8);
- bestWidth = width;
- break;
- }
- newMask = newMask << width;
+ // If the LHS is '(and load, const)', the RHS is 0,
+ // the test is for equality or unsigned, and all 1 bits of the const are
+ // in the same partial word, see if we can shorten the load.
+ if (DCI.isBeforeLegalize() &&
+ N0.getOpcode() == ISD::AND && C1 == 0 &&
+ N0.getNode()->hasOneUse() &&
+ isa<LoadSDNode>(N0.getOperand(0)) &&
+ N0.getOperand(0).getNode()->hasOneUse() &&
+ isa<ConstantSDNode>(N0.getOperand(1))) {
+ LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0));
+ uint64_t bestMask = 0;
+ unsigned bestWidth = 0, bestOffset = 0;
+ if (!Lod->isVolatile() && Lod->isUnindexed() &&
+ // FIXME: This uses getZExtValue() below so it only works on i64 and
+ // below.
+ N0.getValueType().getSizeInBits() <= 64) {
+ unsigned origWidth = N0.getValueType().getSizeInBits();
+ // We can narrow (e.g.) 16-bit extending loads on 32-bit target to
+ // 8 bits, but have to be careful...
+ if (Lod->getExtensionType() != ISD::NON_EXTLOAD)
+ origWidth = Lod->getMemoryVT().getSizeInBits();
+ uint64_t Mask =cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
+ for (unsigned width = origWidth / 2; width>=8; width /= 2) {
+ uint64_t newMask = (1ULL << width) - 1;
+ for (unsigned offset=0; offset<origWidth/width; offset++) {
+ if ((newMask & Mask) == Mask) {
+ if (!TD->isLittleEndian())
+ bestOffset = (origWidth/width - offset - 1) * (width/8);
+ else
+ bestOffset = (uint64_t)offset * (width/8);
+ bestMask = Mask >> (offset * (width/8) * 8);
+ bestWidth = width;
+ break;
}
+ newMask = newMask << width;
}
}
- if (bestWidth) {
- MVT newVT = MVT::getIntegerVT(bestWidth);
- if (newVT.isRound()) {
- MVT PtrType = Lod->getOperand(1).getValueType();
- SDValue Ptr = Lod->getBasePtr();
- if (bestOffset != 0)
- Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
- DAG.getConstant(bestOffset, PtrType));
- unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
- SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
- Lod->getSrcValue(),
- Lod->getSrcValueOffset() + bestOffset,
- false, NewAlign);
- return DAG.getSetCC(dl, VT,
- DAG.getNode(ISD::AND, dl, newVT, NewLoad,
- DAG.getConstant(bestMask, newVT)),
- DAG.getConstant(0LL, newVT), Cond);
- }
+ }
+ if (bestWidth) {
+ EVT newVT = EVT::getIntegerVT(Context, bestWidth);
+ if (newVT.isRound()) {
+ EVT PtrType = Lod->getOperand(1).getValueType();
+ SDValue Ptr = Lod->getBasePtr();
+ if (bestOffset != 0)
+ Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(),
+ DAG.getConstant(bestOffset, PtrType));
+ unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset);
+ SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
+ Lod->getSrcValue(),
+ Lod->getSrcValueOffset() + bestOffset,
+ false, NewAlign);
+ return DAG.getSetCC(dl, VT,
+ DAG.getNode(ISD::AND, dl, newVT, NewLoad,
+ DAG.getConstant(bestMask, newVT)),
+ DAG.getConstant(0LL, newVT), Cond);
}
}
+ }
- // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
- if (N0.getOpcode() == ISD::ZERO_EXTEND) {
- unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
-
- // If the comparison constant has bits in the upper part, the
- // zero-extended value could never match.
- if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
- C1.getBitWidth() - InSize))) {
- switch (Cond) {
- case ISD::SETUGT:
- case ISD::SETUGE:
- case ISD::SETEQ: return DAG.getConstant(0, VT);
- case ISD::SETULT:
- case ISD::SETULE:
- case ISD::SETNE: return DAG.getConstant(1, VT);
- case ISD::SETGT:
- case ISD::SETGE:
- // True if the sign bit of C1 is set.
- return DAG.getConstant(C1.isNegative(), VT);
- case ISD::SETLT:
- case ISD::SETLE:
- // True if the sign bit of C1 isn't set.
- return DAG.getConstant(C1.isNonNegative(), VT);
- default:
- break;
- }
- }
+ // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
+ if (N0.getOpcode() == ISD::ZERO_EXTEND) {
+ unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
- // Otherwise, we can perform the comparison with the low bits.
+ // If the comparison constant has bits in the upper part, the
+ // zero-extended value could never match.
+ if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(),
+ C1.getBitWidth() - InSize))) {
switch (Cond) {
- case ISD::SETEQ:
- case ISD::SETNE:
case ISD::SETUGT:
case ISD::SETUGE:
+ case ISD::SETEQ: return DAG.getConstant(0, VT);
case ISD::SETULT:
case ISD::SETULE:
- return DAG.getSetCC(dl, VT, N0.getOperand(0),
- DAG.getConstant(APInt(C1).trunc(InSize),
- N0.getOperand(0).getValueType()),
- Cond);
+ case ISD::SETNE: return DAG.getConstant(1, VT);
+ case ISD::SETGT:
+ case ISD::SETGE:
+ // True if the sign bit of C1 is set.
+ return DAG.getConstant(C1.isNegative(), VT);
+ case ISD::SETLT:
+ case ISD::SETLE:
+ // True if the sign bit of C1 isn't set.
+ return DAG.getConstant(C1.isNonNegative(), VT);
default:
- break; // todo, be more careful with signed comparisons
- }
- } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
- (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
- MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
- unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
- MVT ExtDstTy = N0.getValueType();
- unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
-
- // If the extended part has any inconsistent bits, it cannot ever
- // compare equal. In other words, they have to be all ones or all
- // zeros.
- APInt ExtBits =
- APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits);
- if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
- return DAG.getConstant(Cond == ISD::SETNE, VT);
-
- SDValue ZextOp;
- MVT Op0Ty = N0.getOperand(0).getValueType();
- if (Op0Ty == ExtSrcTy) {
- ZextOp = N0.getOperand(0);
- } else {
- APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
- ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
- DAG.getConstant(Imm, Op0Ty));
- }
- if (!DCI.isCalledByLegalizer())
- DCI.AddToWorklist(ZextOp.getNode());
- // Otherwise, make this a use of a zext.
- return DAG.getSetCC(dl, VT, ZextOp,
- DAG.getConstant(C1 & APInt::getLowBitsSet(
- ExtDstTyBits,
- ExtSrcTyBits),
- ExtDstTy),
- Cond);
- } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
- (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
-
- // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
- if (N0.getOpcode() == ISD::SETCC) {
- bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1);
- if (TrueWhenTrue)
- return N0;
-
- // Invert the condition.
- ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
- CC = ISD::getSetCCInverse(CC,
- N0.getOperand(0).getValueType().isInteger());
- return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
- }
-
- if ((N0.getOpcode() == ISD::XOR ||
- (N0.getOpcode() == ISD::AND &&
- N0.getOperand(0).getOpcode() == ISD::XOR &&
- N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
- isa<ConstantSDNode>(N0.getOperand(1)) &&
- cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
- // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
- // can only do this if the top bits are known zero.
- unsigned BitWidth = N0.getValueSizeInBits();
- if (DAG.MaskedValueIsZero(N0,
- APInt::getHighBitsSet(BitWidth,
- BitWidth-1))) {
- // Okay, get the un-inverted input value.
- SDValue Val;
- if (N0.getOpcode() == ISD::XOR)
- Val = N0.getOperand(0);
- else {
- assert(N0.getOpcode() == ISD::AND &&
- N0.getOperand(0).getOpcode() == ISD::XOR);
- // ((X^1)&1)^1 -> X & 1
- Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
- N0.getOperand(0).getOperand(0),
- N0.getOperand(1));
- }
- return DAG.getSetCC(dl, VT, Val, N1,
- Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
- }
+ break;
}
}
+
+ // Otherwise, we can perform the comparison with the low bits.
+ switch (Cond) {
+ case ISD::SETEQ:
+ case ISD::SETNE:
+ case ISD::SETUGT:
+ case ISD::SETUGE:
+ case ISD::SETULT:
+ case ISD::SETULE: {
+ EVT newVT = N0.getOperand(0).getValueType();
+ if (DCI.isBeforeLegalizeOps() ||
+ (isOperationLegal(ISD::SETCC, newVT) &&
+ getCondCodeAction(Cond, newVT)==Legal))
+ return DAG.getSetCC(dl, VT, N0.getOperand(0),
+ DAG.getConstant(APInt(C1).trunc(InSize), newVT),
+ Cond);
+ break;
+ }
+ default:
+ break; // todo, be more careful with signed comparisons
+ }
+ } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
+ EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
+ unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
+ EVT ExtDstTy = N0.getValueType();
+ unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
+
+ // If the extended part has any inconsistent bits, it cannot ever
+ // compare equal. In other words, they have to be all ones or all
+ // zeros.
+ APInt ExtBits =
+ APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits);
+ if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
+ return DAG.getConstant(Cond == ISD::SETNE, VT);
- APInt MinVal, MaxVal;
- unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
- if (ISD::isSignedIntSetCC(Cond)) {
- MinVal = APInt::getSignedMinValue(OperandBitSize);
- MaxVal = APInt::getSignedMaxValue(OperandBitSize);
+ SDValue ZextOp;
+ EVT Op0Ty = N0.getOperand(0).getValueType();
+ if (Op0Ty == ExtSrcTy) {
+ ZextOp = N0.getOperand(0);
} else {
- MinVal = APInt::getMinValue(OperandBitSize);
- MaxVal = APInt::getMaxValue(OperandBitSize);
+ APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits);
+ ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0),
+ DAG.getConstant(Imm, Op0Ty));
}
-
- // Canonicalize GE/LE comparisons to use GT/LT comparisons.
- if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
- if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
- // X >= C0 --> X > (C0-1)
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(C1-1, N1.getValueType()),
- (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
+ if (!DCI.isCalledByLegalizer())
+ DCI.AddToWorklist(ZextOp.getNode());
+ // Otherwise, make this a use of a zext.
+ return DAG.getSetCC(dl, VT, ZextOp,
+ DAG.getConstant(C1 & APInt::getLowBitsSet(
+ ExtDstTyBits,
+ ExtSrcTyBits),
+ ExtDstTy),
+ Cond);
+ } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) &&
+ (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
+
+ // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC
+ if (N0.getOpcode() == ISD::SETCC) {
+ bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1);
+ if (TrueWhenTrue)
+ return N0;
+
+ // Invert the condition.
+ ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
+ CC = ISD::getSetCCInverse(CC,
+ N0.getOperand(0).getValueType().isInteger());
+ return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
}
-
- if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
- if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
- // X <= C0 --> X < (C0+1)
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(C1+1, N1.getValueType()),
- (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
+
+ if ((N0.getOpcode() == ISD::XOR ||
+ (N0.getOpcode() == ISD::AND &&
+ N0.getOperand(0).getOpcode() == ISD::XOR &&
+ N0.getOperand(1) == N0.getOperand(0).getOperand(1))) &&
+ isa<ConstantSDNode>(N0.getOperand(1)) &&
+ cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) {
+ // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
+ // can only do this if the top bits are known zero.
+ unsigned BitWidth = N0.getValueSizeInBits();
+ if (DAG.MaskedValueIsZero(N0,
+ APInt::getHighBitsSet(BitWidth,
+ BitWidth-1))) {
+ // Okay, get the un-inverted input value.
+ SDValue Val;
+ if (N0.getOpcode() == ISD::XOR)
+ Val = N0.getOperand(0);
+ else {
+ assert(N0.getOpcode() == ISD::AND &&
+ N0.getOperand(0).getOpcode() == ISD::XOR);
+ // ((X^1)&1)^1 -> X & 1
+ Val = DAG.getNode(ISD::AND, dl, N0.getValueType(),
+ N0.getOperand(0).getOperand(0),
+ N0.getOperand(1));
+ }
+ return DAG.getSetCC(dl, VT, Val, N1,
+ Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
+ }
}
+ }
+
+ APInt MinVal, MaxVal;
+ unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
+ if (ISD::isSignedIntSetCC(Cond)) {
+ MinVal = APInt::getSignedMinValue(OperandBitSize);
+ MaxVal = APInt::getSignedMaxValue(OperandBitSize);
+ } else {
+ MinVal = APInt::getMinValue(OperandBitSize);
+ MaxVal = APInt::getMaxValue(OperandBitSize);
+ }
- if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
- return DAG.getConstant(0, VT); // X < MIN --> false
- if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
- return DAG.getConstant(1, VT); // X >= MIN --> true
- if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
- return DAG.getConstant(0, VT); // X > MAX --> false
- if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
- return DAG.getConstant(1, VT); // X <= MAX --> true
-
- // Canonicalize setgt X, Min --> setne X, Min
- if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
- return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
- // Canonicalize setlt X, Max --> setne X, Max
- if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
- return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
-
- // If we have setult X, 1, turn it into seteq X, 0
- if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(MinVal, N0.getValueType()),
- ISD::SETEQ);
- // If we have setugt X, Max-1, turn it into seteq X, Max
- else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(MaxVal, N0.getValueType()),
- ISD::SETEQ);
-
- // If we have "setcc X, C0", check to see if we can shrink the immediate
- // by changing cc.
-
- // SETUGT X, SINTMAX -> SETLT X, 0
- if (Cond == ISD::SETUGT &&
- C1 == APInt::getSignedMaxValue(OperandBitSize))
- return DAG.getSetCC(dl, VT, N0,
- DAG.getConstant(0, N1.getValueType()),
- ISD::SETLT);
-
- // SETULT X, SINTMIN -> SETGT X, -1
- if (Cond == ISD::SETULT &&
- C1 == APInt::getSignedMinValue(OperandBitSize)) {
- SDValue ConstMinusOne =
- DAG.getConstant(APInt::getAllOnesValue(OperandBitSize),
- N1.getValueType());
- return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
- }
+ // Canonicalize GE/LE comparisons to use GT/LT comparisons.
+ if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
+ if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true
+ // X >= C0 --> X > (C0-1)
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(C1-1, N1.getValueType()),
+ (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT);
+ }
- // Fold bit comparisons when we can.
- if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
- VT == N0.getValueType() && N0.getOpcode() == ISD::AND)
- if (ConstantSDNode *AndRHS =
- dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
- MVT ShiftTy = DCI.isBeforeLegalize() ?
- getPointerTy() : getShiftAmountTy();
- if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
- // Perform the xform if the AND RHS is a single bit.
- if (isPowerOf2_64(AndRHS->getZExtValue())) {
- return DAG.getNode(ISD::SRL, dl, VT, N0,
- DAG.getConstant(Log2_64(AndRHS->getZExtValue()),
- ShiftTy));
- }
- } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) {
- // (X & 8) == 8 --> (X & 8) >> 3
- // Perform the xform if C1 is a single bit.
- if (C1.isPowerOf2()) {
- return DAG.getNode(ISD::SRL, dl, VT, N0,
- DAG.getConstant(C1.logBase2(), ShiftTy));
- }
+ if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
+ if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true
+ // X <= C0 --> X < (C0+1)
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(C1+1, N1.getValueType()),
+ (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT);
+ }
+
+ if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal)
+ return DAG.getConstant(0, VT); // X < MIN --> false
+ if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal)
+ return DAG.getConstant(1, VT); // X >= MIN --> true
+ if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal)
+ return DAG.getConstant(0, VT); // X > MAX --> false
+ if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal)
+ return DAG.getConstant(1, VT); // X <= MAX --> true
+
+ // Canonicalize setgt X, Min --> setne X, Min
+ if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal)
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
+ // Canonicalize setlt X, Max --> setne X, Max
+ if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal)
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE);
+
+ // If we have setult X, 1, turn it into seteq X, 0
+ if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1)
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(MinVal, N0.getValueType()),
+ ISD::SETEQ);
+ // If we have setugt X, Max-1, turn it into seteq X, Max
+ else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1)
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(MaxVal, N0.getValueType()),
+ ISD::SETEQ);
+
+ // If we have "setcc X, C0", check to see if we can shrink the immediate
+ // by changing cc.
+
+ // SETUGT X, SINTMAX -> SETLT X, 0
+ if (Cond == ISD::SETUGT &&
+ C1 == APInt::getSignedMaxValue(OperandBitSize))
+ return DAG.getSetCC(dl, VT, N0,
+ DAG.getConstant(0, N1.getValueType()),
+ ISD::SETLT);
+
+ // SETULT X, SINTMIN -> SETGT X, -1
+ if (Cond == ISD::SETULT &&
+ C1 == APInt::getSignedMinValue(OperandBitSize)) {
+ SDValue ConstMinusOne =
+ DAG.getConstant(APInt::getAllOnesValue(OperandBitSize),
+ N1.getValueType());
+ return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT);
+ }
+
+ // Fold bit comparisons when we can.
+ if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+ VT == N0.getValueType() && N0.getOpcode() == ISD::AND)
+ if (ConstantSDNode *AndRHS =
+ dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
+ EVT ShiftTy = DCI.isBeforeLegalize() ?
+ getPointerTy() : getShiftAmountTy();
+ if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
+ // Perform the xform if the AND RHS is a single bit.
+ if (isPowerOf2_64(AndRHS->getZExtValue())) {
+ return DAG.getNode(ISD::SRL, dl, VT, N0,
+ DAG.getConstant(Log2_64(AndRHS->getZExtValue()),
+ ShiftTy));
+ }
+ } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) {
+ // (X & 8) == 8 --> (X & 8) >> 3
+ // Perform the xform if C1 is a single bit.
+ if (C1.isPowerOf2()) {
+ return DAG.getNode(ISD::SRL, dl, VT, N0,
+ DAG.getConstant(C1.logBase2(), ShiftTy));
}
}
- }
- } else if (isa<ConstantSDNode>(N0.getNode())) {
- // Ensure that the constant occurs on the RHS.
- return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
+ }
}
if (isa<ConstantFPSDNode>(N0.getNode())) {
@@ -1840,7 +1906,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
if (CFP->getValueAPF().isNaN()) {
// If an operand is known to be a nan, we can fold it.
switch (ISD::getUnorderedFlavor(Cond)) {
- default: assert(0 && "Unknown flavor!");
+ default: llvm_unreachable("Unknown flavor!");
case 0: // Known false.
return DAG.getConstant(0, VT);
case 1: // Known true.
@@ -1856,6 +1922,43 @@ TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
// materialize 0.0.
if (Cond == ISD::SETO || Cond == ISD::SETUO)
return DAG.getSetCC(dl, VT, N0, N0, Cond);
+
+ // If the condition is not legal, see if we can find an equivalent one
+ // which is legal.
+ if (!isCondCodeLegal(Cond, N0.getValueType())) {
+ // If the comparison was an awkward floating-point == or != and one of
+ // the comparison operands is infinity or negative infinity, convert the
+ // condition to a less-awkward <= or >=.
+ if (CFP->getValueAPF().isInfinity()) {
+ if (CFP->getValueAPF().isNegative()) {
+ if (Cond == ISD::SETOEQ &&
+ isCondCodeLegal(ISD::SETOLE, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE);
+ if (Cond == ISD::SETUEQ &&
+ isCondCodeLegal(ISD::SETOLE, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE);
+ if (Cond == ISD::SETUNE &&
+ isCondCodeLegal(ISD::SETUGT, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT);
+ if (Cond == ISD::SETONE &&
+ isCondCodeLegal(ISD::SETUGT, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT);
+ } else {
+ if (Cond == ISD::SETOEQ &&
+ isCondCodeLegal(ISD::SETOGE, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE);
+ if (Cond == ISD::SETUEQ &&
+ isCondCodeLegal(ISD::SETOGE, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE);
+ if (Cond == ISD::SETUNE &&
+ isCondCodeLegal(ISD::SETULT, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT);
+ if (Cond == ISD::SETONE &&
+ isCondCodeLegal(ISD::SETULT, N0.getValueType()))
+ return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT);
+ }
+ }
+ }
}
if (N0 == N1) {
@@ -2000,7 +2103,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
SDValue Temp;
if (N0.getValueType() == MVT::i1 && foldBooleans) {
switch (Cond) {
- default: assert(0 && "Unknown integer setcc!");
+ default: llvm_unreachable("Unknown integer setcc!");
case ISD::SETEQ: // X == Y -> ~(X^Y)
Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
N0 = DAG.getNOT(dl, Temp, MVT::i1);
@@ -2090,7 +2193,7 @@ bool TargetLowering::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
const MachineFrameInfo *MFI) const {
if (LD->getChain() != Base->getChain())
return false;
- MVT VT = LD->getValueType(0);
+ EVT VT = LD->getValueType(0);
if (VT.getSizeInBits() / 8 != Bytes)
return false;
@@ -2171,7 +2274,7 @@ TargetLowering::getConstraintType(const std::string &Constraint) const {
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
-const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{
+const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
if (ConstraintVT.isInteger())
return "r";
if (ConstraintVT.isFloatingPoint())
@@ -2244,14 +2347,14 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::vector<unsigned> TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const {
+ EVT VT) const {
return std::vector<unsigned>();
}
std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const {
+ EVT VT) const {
if (Constraint[0] != '{')
return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
@@ -2280,7 +2383,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I) {
- if (StringsEqualNoCase(RegName, RI->get(*I).AsmName))
+ if (StringsEqualNoCase(RegName, RI->getName(*I)))
return std::make_pair(*I, RC);
}
}
@@ -2310,7 +2413,7 @@ unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const {
/// is.
static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
switch (CT) {
- default: assert(0 && "Unknown constraint type!");
+ default: llvm_unreachable("Unknown constraint type!");
case TargetLowering::C_Other:
case TargetLowering::C_Unknown:
return 0;
@@ -2406,10 +2509,13 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
// 'X' matches anything.
if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
// Labels and constants are handled elsewhere ('X' is the only thing
- // that matches labels).
- if (isa<BasicBlock>(OpInfo.CallOperandVal) ||
- isa<ConstantInt>(OpInfo.CallOperandVal))
+ // that matches labels). For Functions, the type here is the type of
+ // the result, which is not what we want to look at; leave them alone.
+ Value *v = OpInfo.CallOperandVal;
+ if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) {
+ OpInfo.CallOperandVal = v;
return;
+ }
// Otherwise, try to resolve it to something we know about by looking at
// the actual operand type.
@@ -2464,7 +2570,7 @@ bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl= N->getDebugLoc();
// Check to see if we can do this.
@@ -2521,7 +2627,7 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const {
- MVT VT = N->getValueType(0);
+ EVT VT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
// Check to see if we can do this.
@@ -2569,45 +2675,3 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
DAG.getConstant(magics.s-1, getShiftAmountTy()));
}
}
-
-/// IgnoreHarmlessInstructions - Ignore instructions between a CALL and RET
-/// node that don't prevent tail call optimization.
-static SDValue IgnoreHarmlessInstructions(SDValue node) {
- // Found call return.
- if (node.getOpcode() == ISD::CALL) return node;
- // Ignore MERGE_VALUES. Will have at least one operand.
- if (node.getOpcode() == ISD::MERGE_VALUES)
- return IgnoreHarmlessInstructions(node.getOperand(0));
- // Ignore ANY_EXTEND node.
- if (node.getOpcode() == ISD::ANY_EXTEND)
- return IgnoreHarmlessInstructions(node.getOperand(0));
- if (node.getOpcode() == ISD::TRUNCATE)
- return IgnoreHarmlessInstructions(node.getOperand(0));
- // Any other node type.
- return node;
-}
-
-bool TargetLowering::CheckTailCallReturnConstraints(CallSDNode *TheCall,
- SDValue Ret) {
- unsigned NumOps = Ret.getNumOperands();
- // ISD::CALL results:(value0, ..., valuen, chain)
- // ISD::RET operands:(chain, value0, flag0, ..., valuen, flagn)
- // Value return:
- // Check that operand of the RET node sources from the CALL node. The RET node
- // has at least two operands. Operand 0 holds the chain. Operand 1 holds the
- // value.
- // Also we need to check that there is no code in between the call and the
- // return. Hence we also check that the incomming chain to the return sources
- // from the outgoing chain of the call.
- if (NumOps > 1 &&
- IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0) &&
- Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1))
- return true;
- // void return: The RET node has the chain result value of the CALL node as
- // input.
- if (NumOps == 1 &&
- Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1))
- return true;
-
- return false;
-}
OpenPOWER on IntegriCloud