diff options
Diffstat (limited to 'contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp')
-rw-r--r-- | contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 563 |
1 files changed, 357 insertions, 206 deletions
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index c075da4..1d61657 100644 --- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "llvm/CodeGen/GCStrategy.h" +#include "llvm/CodeGen/SelectionDAG.h" #include "ScheduleDAGSDNodes.h" #include "SelectionDAGBuilder.h" #include "llvm/ADT/PostOrderIterator.h" @@ -21,10 +21,10 @@ #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/TargetLibraryInfo.h" -#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GCMetadata.h" +#include "llvm/CodeGen/GCStrategy.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -32,8 +32,8 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/ScheduleHazardRecognizer.h" #include "llvm/CodeGen/SchedulerRegistry.h" -#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/WinEHFuncInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" @@ -59,6 +59,7 @@ #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include <algorithm> + using namespace llvm; #define DEBUG_TYPE "isel" @@ -317,7 +318,7 @@ namespace llvm { "Unknown sched type!"); return createILPListDAGScheduler(IS, OptLevel); } -} +} // end namespace llvm // EmitInstrWithCustomInserter - This method should be implemented by targets // that mark instructions with the 'usesCustomInserter' flag. These @@ -329,7 +330,7 @@ namespace llvm { // are modified, the method should insert pairs of <OldSucc, NewSucc> into the // DenseMap. MachineBasicBlock * -TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, +TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const { #ifndef NDEBUG dbgs() << "If a target marks an instruction with " @@ -339,9 +340,9 @@ TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, llvm_unreachable(nullptr); } -void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, +void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const { - assert(!MI->hasPostISelHook() && + assert(!MI.hasPostISelHook() && "If a target marks an instruction with 'hasPostISelHook', " "it must implement TargetLowering::AdjustInstrPostInstrSelection!"); } @@ -376,6 +377,8 @@ SelectionDAGISel::~SelectionDAGISel() { void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<AAResultsWrapperPass>(); AU.addRequired<GCModuleInfo>(); + AU.addRequired<StackProtector>(); + AU.addPreserved<StackProtector>(); AU.addPreserved<GCModuleInfo>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); if (UseMBPI && OptLevel != CodeGenOpt::None) @@ -440,7 +443,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { TM.resetTargetOptions(Fn); // Reset OptLevel to None for optnone functions. CodeGenOpt::Level NewOptLevel = OptLevel; - if (Fn.hasFnAttribute(Attribute::OptimizeNone)) + if (OptLevel != CodeGenOpt::None && skipFunction(Fn)) NewOptLevel = CodeGenOpt::None; OptLevelChanger OLC(*this, NewOptLevel); @@ -468,11 +471,10 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { MF->setHasInlineAsm(false); FuncInfo->SplitCSR = false; - SmallVector<MachineBasicBlock*, 4> Returns; // We split CSR if the target supports it for the given function // and the function has only return exits. - if (TLI->supportSplitCSR(MF)) { + if (OptLevel != CodeGenOpt::None && TLI->supportSplitCSR(MF)) { FuncInfo->SplitCSR = true; // Collect all the return blocks. @@ -481,12 +483,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { continue; const TerminatorInst *Term = BB.getTerminator(); - if (isa<UnreachableInst>(Term)) + if (isa<UnreachableInst>(Term) || isa<ReturnInst>(Term)) continue; - if (isa<ReturnInst>(Term)) { - Returns.push_back(FuncInfo->MBBMap[&BB]); - continue; - } // Bail out if the exit block is not Return nor Unreachable. FuncInfo->SplitCSR = false; @@ -508,8 +506,21 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { RegInfo->EmitLiveInCopies(EntryMBB, TRI, *TII); // Insert copies in the entry block and the return blocks. - if (FuncInfo->SplitCSR) + if (FuncInfo->SplitCSR) { + SmallVector<MachineBasicBlock*, 4> Returns; + // Collect all the return blocks. + for (MachineBasicBlock &MBB : mf) { + if (!MBB.succ_empty()) + continue; + + MachineBasicBlock::iterator Term = MBB.getFirstTerminator(); + if (Term != MBB.end() && Term->isReturn()) { + Returns.push_back(&MBB); + continue; + } + } TLI->insertCopiesSplitCSR(EntryMBB, Returns); + } DenseMap<unsigned, unsigned> LiveInMap; if (!FuncInfo->ArgDbgValues.empty()) @@ -669,7 +680,7 @@ void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin, } void SelectionDAGISel::ComputeLiveOutVRegInfo() { - SmallPtrSet<SDNode*, 128> VisitedNodes; + SmallPtrSet<SDNode*, 16> VisitedNodes; SmallVector<SDNode*, 128> Worklist; Worklist.push_back(CurDAG->getRoot().getNode()); @@ -854,7 +865,8 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { Scheduler->Run(CurDAG, FuncInfo->MBB); } - if (ViewSUnitDAGs && MatchFilterBB) Scheduler->viewGraph(); + if (ViewSUnitDAGs && MatchFilterBB) + Scheduler->viewGraph(); // Emit machine code to BB. This can change 'BB' to the last block being // inserted into. @@ -937,23 +949,7 @@ void SelectionDAGISel::DoInstructionSelection() { if (Node->use_empty()) continue; - SDNode *ResNode = Select(Node); - - // FIXME: This is pretty gross. 'Select' should be changed to not return - // anything at all and this code should be nuked with a tactical strike. - - // If node should not be replaced, continue with the next one. - if (ResNode == Node || Node->getOpcode() == ISD::DELETED_NODE) - continue; - // Replace node. - if (ResNode) { - ReplaceUses(Node, ResNode); - } - - // If after the replacement this node is not used any more, - // remove this dead node. - if (Node->use_empty()) // Don't delete EntryToken, etc. - CurDAG->RemoveDeadNode(Node); + Select(Node); } CurDAG->setRoot(Dummy.getValue()); @@ -1147,7 +1143,125 @@ static void collectFailStats(const Instruction *I) { case Instruction::LandingPad: NumFastIselFailLandingPad++; return; } } -#endif +#endif // NDEBUG + +/// Set up SwiftErrorVals by going through the function. If the function has +/// swifterror argument, it will be the first entry. +static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI, + FunctionLoweringInfo *FuncInfo) { + if (!TLI->supportSwiftError()) + return; + + FuncInfo->SwiftErrorVals.clear(); + FuncInfo->SwiftErrorMap.clear(); + FuncInfo->SwiftErrorWorklist.clear(); + + // Check if function has a swifterror argument. + for (Function::const_arg_iterator AI = Fn.arg_begin(), AE = Fn.arg_end(); + AI != AE; ++AI) + if (AI->hasSwiftErrorAttr()) + FuncInfo->SwiftErrorVals.push_back(&*AI); + + for (const auto &LLVMBB : Fn) + for (const auto &Inst : LLVMBB) { + if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(&Inst)) + if (Alloca->isSwiftError()) + FuncInfo->SwiftErrorVals.push_back(Alloca); + } +} + +/// For each basic block, merge incoming swifterror values or simply propagate +/// them. The merged results will be saved in SwiftErrorMap. For predecessors +/// that are not yet visited, we create virtual registers to hold the swifterror +/// values and save them in SwiftErrorWorklist. +static void mergeIncomingSwiftErrors(FunctionLoweringInfo *FuncInfo, + const TargetLowering *TLI, + const TargetInstrInfo *TII, + const BasicBlock *LLVMBB, + SelectionDAGBuilder *SDB) { + if (!TLI->supportSwiftError()) + return; + + // We should only do this when we have swifterror parameter or swifterror + // alloc. + if (FuncInfo->SwiftErrorVals.empty()) + return; + + // At beginning of a basic block, insert PHI nodes or get the virtual + // register from the only predecessor, and update SwiftErrorMap; if one + // of the predecessors is not visited, update SwiftErrorWorklist. + // At end of a basic block, if a block is in SwiftErrorWorklist, insert copy + // to sync up the virtual register assignment. + + // Always create a virtual register for each swifterror value in entry block. + auto &DL = SDB->DAG.getDataLayout(); + const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); + if (pred_begin(LLVMBB) == pred_end(LLVMBB)) { + for (unsigned I = 0, E = FuncInfo->SwiftErrorVals.size(); I < E; I++) { + unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC); + // Assign Undef to Vreg. We construct MI directly to make sure it works + // with FastISel. + BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), + TII->get(TargetOpcode::IMPLICIT_DEF), VReg); + FuncInfo->SwiftErrorMap[FuncInfo->MBB].push_back(VReg); + } + return; + } + + if (auto *UniquePred = LLVMBB->getUniquePredecessor()) { + auto *UniquePredMBB = FuncInfo->MBBMap[UniquePred]; + if (!FuncInfo->SwiftErrorMap.count(UniquePredMBB)) { + // Update SwiftErrorWorklist with a new virtual register. + for (unsigned I = 0, E = FuncInfo->SwiftErrorVals.size(); I < E; I++) { + unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC); + FuncInfo->SwiftErrorWorklist[UniquePredMBB].push_back(VReg); + // Propagate the information from the single predecessor. + FuncInfo->SwiftErrorMap[FuncInfo->MBB].push_back(VReg); + } + return; + } + // Propagate the information from the single predecessor. + FuncInfo->SwiftErrorMap[FuncInfo->MBB] = + FuncInfo->SwiftErrorMap[UniquePredMBB]; + return; + } + + // For the case of multiple predecessors, update SwiftErrorWorklist. + // Handle the case where we have two or more predecessors being the same. + for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB); + PI != PE; ++PI) { + auto *PredMBB = FuncInfo->MBBMap[*PI]; + if (!FuncInfo->SwiftErrorMap.count(PredMBB) && + !FuncInfo->SwiftErrorWorklist.count(PredMBB)) { + for (unsigned I = 0, E = FuncInfo->SwiftErrorVals.size(); I < E; I++) { + unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC); + // When we actually visit the basic block PredMBB, we will materialize + // the virtual register assignment in copySwiftErrorsToFinalVRegs. + FuncInfo->SwiftErrorWorklist[PredMBB].push_back(VReg); + } + } + } + + // For the case of multiple predecessors, create a virtual register for + // each swifterror value and generate Phi node. + for (unsigned I = 0, E = FuncInfo->SwiftErrorVals.size(); I < E; I++) { + unsigned VReg = FuncInfo->MF->getRegInfo().createVirtualRegister(RC); + FuncInfo->SwiftErrorMap[FuncInfo->MBB].push_back(VReg); + + MachineInstrBuilder SwiftErrorPHI = BuildMI(*FuncInfo->MBB, + FuncInfo->MBB->begin(), SDB->getCurDebugLoc(), + TII->get(TargetOpcode::PHI), VReg); + for (const_pred_iterator PI = pred_begin(LLVMBB), PE = pred_end(LLVMBB); + PI != PE; ++PI) { + auto *PredMBB = FuncInfo->MBBMap[*PI]; + unsigned SwiftErrorReg = FuncInfo->SwiftErrorMap.count(PredMBB) ? + FuncInfo->SwiftErrorMap[PredMBB][I] : + FuncInfo->SwiftErrorWorklist[PredMBB][I]; + SwiftErrorPHI.addReg(SwiftErrorReg) + .addMBB(PredMBB); + } + } +} void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // Initialize the Fast-ISel state, if needed. @@ -1155,6 +1269,8 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { if (TM.Options.EnableFastISel) FastIS = TLI->createFastISel(*FuncInfo, LibInfo); + setupSwiftErrorVals(Fn, TLI, FuncInfo); + // Iterate over all basic blocks in the function. ReversePostOrderTraversal<const Function*> RPOT(&Fn); for (ReversePostOrderTraversal<const Function*>::rpo_iterator @@ -1193,6 +1309,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { if (!FuncInfo->MBB) continue; // Some blocks like catchpads have no code or MBB. FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI(); + mergeIncomingSwiftErrors(FuncInfo, TLI, TII, LLVMBB, SDB); // Setup an EH landing-pad block. FuncInfo->ExceptionPointerVirtReg = 0; @@ -1228,7 +1345,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // where they are, so we can be sure to emit subsequent instructions // after them. if (FuncInfo->InsertPt != FuncInfo->MBB->begin()) - FastIS->setLastLocalValue(std::prev(FuncInfo->InsertPt)); + FastIS->setLastLocalValue(&*std::prev(FuncInfo->InsertPt)); else FastIS->setLastLocalValue(nullptr); } @@ -1345,6 +1462,12 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { LowerArguments(Fn); } } + if (getAnalysis<StackProtector>().shouldEmitSDCheck(*LLVMBB)) { + bool FunctionBasedInstrumentation = + TLI->getSSPStackGuardCheck(*Fn.getParent()); + SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB], + FunctionBasedInstrumentation); + } if (Begin != BI) ++NumDAGBlocks; @@ -1376,15 +1499,15 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { /// terminator instructors so we can satisfy ABI constraints. A partial /// terminator sequence is an improper subset of a terminator sequence (i.e. it /// may be the whole terminator sequence). -static bool MIIsInTerminatorSequence(const MachineInstr *MI) { +static bool MIIsInTerminatorSequence(const MachineInstr &MI) { // If we do not have a copy or an implicit def, we return true if and only if // MI is a debug value. - if (!MI->isCopy() && !MI->isImplicitDef()) + if (!MI.isCopy() && !MI.isImplicitDef()) // Sometimes DBG_VALUE MI sneak in between the copies from the vregs to the // physical registers if there is debug info associated with the terminator // of our mbb. We want to include said debug info in our terminator // sequence, so we return true in that case. - return MI->isDebugValue(); + return MI.isDebugValue(); // We have left the terminator sequence if we are not doing one of the // following: @@ -1394,18 +1517,18 @@ static bool MIIsInTerminatorSequence(const MachineInstr *MI) { // 3. Defining a register via an implicit def. // OPI should always be a register definition... - MachineInstr::const_mop_iterator OPI = MI->operands_begin(); + MachineInstr::const_mop_iterator OPI = MI.operands_begin(); if (!OPI->isReg() || !OPI->isDef()) return false; // Defining any register via an implicit def is always ok. - if (MI->isImplicitDef()) + if (MI.isImplicitDef()) return true; // Grab the copy source... MachineInstr::const_mop_iterator OPI2 = OPI; ++OPI2; - assert(OPI2 != MI->operands_end() + assert(OPI2 != MI.operands_end() && "Should have a copy implying we should have 2 arguments."); // Make sure that the copy dest is not a vreg when the copy source is a @@ -1432,7 +1555,7 @@ static bool MIIsInTerminatorSequence(const MachineInstr *MI) { /// terminator, but additionally the copies that move the vregs into the /// physical registers. static MachineBasicBlock::iterator -FindSplitPointForStackProtector(MachineBasicBlock *BB, DebugLoc DL) { +FindSplitPointForStackProtector(MachineBasicBlock *BB) { MachineBasicBlock::iterator SplitPoint = BB->getFirstTerminator(); // if (SplitPoint == BB->begin()) @@ -1442,7 +1565,7 @@ FindSplitPointForStackProtector(MachineBasicBlock *BB, DebugLoc DL) { MachineBasicBlock::iterator Previous = SplitPoint; --Previous; - while (MIIsInTerminatorSequence(Previous)) { + while (MIIsInTerminatorSequence(*Previous)) { SplitPoint = Previous; if (Previous == Start) break; @@ -1454,7 +1577,6 @@ FindSplitPointForStackProtector(MachineBasicBlock *BB, DebugLoc DL) { void SelectionDAGISel::FinishBasicBlock() { - DEBUG(dbgs() << "Total amount of phi nodes to update: " << FuncInfo->PHINodesToUpdate.size() << "\n"; for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) @@ -1474,7 +1596,23 @@ SelectionDAGISel::FinishBasicBlock() { } // Handle stack protector. - if (SDB->SPDescriptor.shouldEmitStackProtector()) { + if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) { + // The target provides a guard check function. There is no need to + // generate error handling code or to split current basic block. + MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB(); + + // Add load and check to the basicblock. + FuncInfo->MBB = ParentMBB; + FuncInfo->InsertPt = + FindSplitPointForStackProtector(ParentMBB); + SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB); + CurDAG->setRoot(SDB->getRoot()); + SDB->clear(); + CodeGenAndEmitDAG(); + + // Clear the Per-BB State. + SDB->SPDescriptor.resetPerBBState(); + } else if (SDB->SPDescriptor.shouldEmitStackProtector()) { MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB(); MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB(); @@ -1485,7 +1623,7 @@ SelectionDAGISel::FinishBasicBlock() { // register allocation issues caused by us splitting the parent mbb. The // register allocator will clean up said virtual copies later on. MachineBasicBlock::iterator SplitPoint = - FindSplitPointForStackProtector(ParentMBB, SDB->getCurDebugLoc()); + FindSplitPointForStackProtector(ParentMBB); // Splice the terminator of ParentMBB into SuccessMBB. SuccessMBB->splice(SuccessMBB->end(), ParentMBB, @@ -1502,7 +1640,7 @@ SelectionDAGISel::FinishBasicBlock() { // CodeGen Failure MBB if we have not codegened it yet. MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB(); - if (!FailureMBB->size()) { + if (FailureMBB->empty()) { FuncInfo->MBB = FailureMBB; FuncInfo->InsertPt = FailureMBB->end(); SDB->visitSPDescriptorFailure(SDB->SPDescriptor); @@ -1515,52 +1653,61 @@ SelectionDAGISel::FinishBasicBlock() { SDB->SPDescriptor.resetPerBBState(); } - for (unsigned i = 0, e = SDB->BitTestCases.size(); i != e; ++i) { + // Lower each BitTestBlock. + for (auto &BTB : SDB->BitTestCases) { // Lower header first, if it wasn't already lowered - if (!SDB->BitTestCases[i].Emitted) { + if (!BTB.Emitted) { // Set the current basic block to the mbb we wish to insert the code into - FuncInfo->MBB = SDB->BitTestCases[i].Parent; + FuncInfo->MBB = BTB.Parent; FuncInfo->InsertPt = FuncInfo->MBB->end(); // Emit the code - SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB); + SDB->visitBitTestHeader(BTB, FuncInfo->MBB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); CodeGenAndEmitDAG(); } - BranchProbability UnhandledProb = SDB->BitTestCases[i].Prob; - for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) { - UnhandledProb -= SDB->BitTestCases[i].Cases[j].ExtraProb; + BranchProbability UnhandledProb = BTB.Prob; + for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) { + UnhandledProb -= BTB.Cases[j].ExtraProb; // Set the current basic block to the mbb we wish to insert the code into - FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB; + FuncInfo->MBB = BTB.Cases[j].ThisBB; FuncInfo->InsertPt = FuncInfo->MBB->end(); // Emit the code // If all cases cover a contiguous range, it is not necessary to jump to // the default block after the last bit test fails. This is because the // range check during bit test header creation has guaranteed that every - // case here doesn't go outside the range. + // case here doesn't go outside the range. In this case, there is no need + // to perform the last bit test, as it will always be true. Instead, make + // the second-to-last bit-test fall through to the target of the last bit + // test, and delete the last bit test. + MachineBasicBlock *NextMBB; - if (SDB->BitTestCases[i].ContiguousRange && j + 2 == ej) - NextMBB = SDB->BitTestCases[i].Cases[j + 1].TargetBB; - else if (j + 1 != ej) - NextMBB = SDB->BitTestCases[i].Cases[j + 1].ThisBB; - else - NextMBB = SDB->BitTestCases[i].Default; + if (BTB.ContiguousRange && j + 2 == ej) { + // Second-to-last bit-test with contiguous range: fall through to the + // target of the final bit test. + NextMBB = BTB.Cases[j + 1].TargetBB; + } else if (j + 1 == ej) { + // For the last bit test, fall through to Default. + NextMBB = BTB.Default; + } else { + // Otherwise, fall through to the next bit test. + NextMBB = BTB.Cases[j + 1].ThisBB; + } - SDB->visitBitTestCase(SDB->BitTestCases[i], - NextMBB, - UnhandledProb, - SDB->BitTestCases[i].Reg, - SDB->BitTestCases[i].Cases[j], + SDB->visitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], FuncInfo->MBB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); CodeGenAndEmitDAG(); - if (SDB->BitTestCases[i].ContiguousRange && j + 2 == ej) + if (BTB.ContiguousRange && j + 2 == ej) { + // Since we're not going to use the final bit test, remove it. + BTB.Cases.pop_back(); break; + } } // Update PHI Nodes @@ -1571,16 +1718,18 @@ SelectionDAGISel::FinishBasicBlock() { assert(PHI->isPHI() && "This is not a machine PHI node that we are updating!"); // This is "default" BB. We have two jumps to it. From "header" BB and - // from last "case" BB. - if (PHIBB == SDB->BitTestCases[i].Default) - PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second) - .addMBB(SDB->BitTestCases[i].Parent) - .addReg(FuncInfo->PHINodesToUpdate[pi].second) - .addMBB(SDB->BitTestCases[i].Cases.back().ThisBB); + // from last "case" BB, unless the latter was skipped. + if (PHIBB == BTB.Default) { + PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(BTB.Parent); + if (!BTB.ContiguousRange) { + PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second) + .addMBB(BTB.Cases.back().ThisBB); + } + } // One of "cases" BB. - for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); + for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) { - MachineBasicBlock* cBB = SDB->BitTestCases[i].Cases[j].ThisBB; + MachineBasicBlock* cBB = BTB.Cases[j].ThisBB; if (cBB->isSuccessor(PHIBB)) PHI.addReg(FuncInfo->PHINodesToUpdate[pi].second).addMBB(cBB); } @@ -1685,7 +1834,6 @@ SelectionDAGISel::FinishBasicBlock() { SDB->SwitchCases.clear(); } - /// Create the scheduler. If a specific scheduler was specified /// via the SchedulerRegistry, use it, otherwise select the /// one preferred by the target. @@ -1764,8 +1912,8 @@ bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS, /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated /// by tblgen. Others should not call it. -void SelectionDAGISel:: -SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SDLoc DL) { +void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, + const SDLoc &DL) { std::vector<SDValue> InOps; std::swap(InOps, Ops); @@ -1802,15 +1950,15 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SDLoc DL) { // Otherwise, this is a memory operand. Ask the target to select it. std::vector<SDValue> SelOps; - if (SelectInlineAsmMemoryOperand(InOps[i+1], - InlineAsm::getMemoryConstraintID(Flags), - SelOps)) + unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags); + if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps)) report_fatal_error("Could not match memory address. Inline asm" " failure!"); // Add this to the output node. unsigned NewFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size()); + NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID); Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32)); Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); i += 2; @@ -1956,7 +2104,7 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root, return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains); } -SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) { +void SelectionDAGISel::Select_INLINEASM(SDNode *N) { SDLoc DL(N); std::vector<SDValue> Ops(N->op_begin(), N->op_end()); @@ -1965,11 +2113,11 @@ SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) { const EVT VTs[] = {MVT::Other, MVT::Glue}; SDValue New = CurDAG->getNode(ISD::INLINEASM, DL, VTs, Ops); New->setNodeId(-1); - return New.getNode(); + ReplaceUses(N, New.getNode()); + CurDAG->RemoveDeadNode(N); } -SDNode -*SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) { +void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) { SDLoc dl(Op); MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1)); const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0)); @@ -1979,11 +2127,11 @@ SDNode SDValue New = CurDAG->getCopyFromReg( Op->getOperand(0), dl, Reg, Op->getValueType(0)); New->setNodeId(-1); - return New.getNode(); + ReplaceUses(Op, New.getNode()); + CurDAG->RemoveDeadNode(Op); } -SDNode -*SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) { +void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) { SDLoc dl(Op); MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(Op->getOperand(1)); const MDString *RegStr = dyn_cast<MDString>(MD->getMD()->getOperand(0)); @@ -1993,13 +2141,12 @@ SDNode SDValue New = CurDAG->getCopyToReg( Op->getOperand(0), dl, Reg, Op->getOperand(2)); New->setNodeId(-1); - return New.getNode(); + ReplaceUses(Op, New.getNode()); + CurDAG->RemoveDeadNode(Op); } - - -SDNode *SelectionDAGISel::Select_UNDEF(SDNode *N) { - return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF,N->getValueType(0)); +void SelectionDAGISel::Select_UNDEF(SDNode *N) { + CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0)); } /// GetVBR - decode a vbr encoding whose top bit is set. @@ -2019,15 +2166,11 @@ GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) { return Val; } - -/// UpdateChainsAndGlue - When a match is complete, this method updates uses of -/// interior glue and chain results to use the new glue and chain results. -void SelectionDAGISel:: -UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain, - const SmallVectorImpl<SDNode*> &ChainNodesMatched, - SDValue InputGlue, - const SmallVectorImpl<SDNode*> &GlueResultNodesMatched, - bool isMorphNodeTo) { +/// When a match is complete, this method updates uses of interior chain results +/// to use the new results. +void SelectionDAGISel::UpdateChains( + SDNode *NodeToMatch, SDValue InputChain, + const SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) { SmallVector<SDNode*, 4> NowDeadNodes; // Now that all the normal results are replaced, we replace the chain and @@ -2039,10 +2182,8 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain, // Replace all the chain results with the final chain we ended up with. for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) { SDNode *ChainNode = ChainNodesMatched[i]; - - // If this node was already deleted, don't look at it. - if (ChainNode->getOpcode() == ISD::DELETED_NODE) - continue; + assert(ChainNode->getOpcode() != ISD::DELETED_NODE && + "Deleted node left in chain"); // Don't replace the results of the root node if we're doing a // MorphNodeTo. @@ -2056,35 +2197,12 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain, CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain); // If the node became dead and we haven't already seen it, delete it. - if (ChainNode->use_empty() && + if (ChainNode != NodeToMatch && ChainNode->use_empty() && !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode)) NowDeadNodes.push_back(ChainNode); } } - // If the result produces glue, update any glue results in the matched - // pattern with the glue result. - if (InputGlue.getNode()) { - // Handle any interior nodes explicitly marked. - for (unsigned i = 0, e = GlueResultNodesMatched.size(); i != e; ++i) { - SDNode *FRN = GlueResultNodesMatched[i]; - - // If this node was already deleted, don't look at it. - if (FRN->getOpcode() == ISD::DELETED_NODE) - continue; - - assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Glue && - "Doesn't have a glue result"); - CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1), - InputGlue); - - // If the node became dead and we haven't already seen it, delete it. - if (FRN->use_empty() && - !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), FRN)) - NowDeadNodes.push_back(FRN); - } - } - if (!NowDeadNodes.empty()) CurDAG->RemoveDeadNodes(NowDeadNodes); @@ -2108,8 +2226,9 @@ enum ChainResult { /// already selected nodes "below" us. static ChainResult WalkChainUsers(const SDNode *ChainedNode, - SmallVectorImpl<SDNode*> &ChainedNodesInPattern, - SmallVectorImpl<SDNode*> &InteriorChainedNodes) { + SmallVectorImpl<SDNode *> &ChainedNodesInPattern, + DenseMap<const SDNode *, ChainResult> &TokenFactorResult, + SmallVectorImpl<SDNode *> &InteriorChainedNodes) { ChainResult Result = CR_Simple; for (SDNode::use_iterator UI = ChainedNode->use_begin(), @@ -2190,7 +2309,15 @@ WalkChainUsers(const SDNode *ChainedNode, // as a new TokenFactor. // // To distinguish these two cases, do a recursive walk down the uses. - switch (WalkChainUsers(User, ChainedNodesInPattern, InteriorChainedNodes)) { + auto MemoizeResult = TokenFactorResult.find(User); + bool Visited = MemoizeResult != TokenFactorResult.end(); + // Recursively walk chain users only if the result is not memoized. + if (!Visited) { + auto Res = WalkChainUsers(User, ChainedNodesInPattern, TokenFactorResult, + InteriorChainedNodes); + MemoizeResult = TokenFactorResult.insert(std::make_pair(User, Res)).first; + } + switch (MemoizeResult->second) { case CR_Simple: // If the uses of the TokenFactor are just already-selected nodes, ignore // it, it is "below" our pattern. @@ -2210,9 +2337,10 @@ WalkChainUsers(const SDNode *ChainedNode, // ultimate chain result of the generated code. We will also add its chain // inputs as inputs to the ultimate TokenFactor we create. Result = CR_LeadsToInteriorNode; - ChainedNodesInPattern.push_back(User); - InteriorChainedNodes.push_back(User); - continue; + if (!Visited) { + ChainedNodesInPattern.push_back(User); + InteriorChainedNodes.push_back(User); + } } return Result; @@ -2227,12 +2355,16 @@ WalkChainUsers(const SDNode *ChainedNode, static SDValue HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched, SelectionDAG *CurDAG) { + // Used for memoization. Without it WalkChainUsers could take exponential + // time to run. + DenseMap<const SDNode *, ChainResult> TokenFactorResult; // Walk all of the chained nodes we've matched, recursively scanning down the // users of the chain result. This adds any TokenFactor nodes that are caught // in between chained nodes to the chained and interior nodes list. SmallVector<SDNode*, 3> InteriorChainedNodes; for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) { if (WalkChainUsers(ChainNodesMatched[i], ChainNodesMatched, + TokenFactorResult, InteriorChainedNodes) == CR_InducesCycle) return SDValue(); // Would induce a cycle. } @@ -2322,8 +2454,10 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList, // Otherwise, no replacement happened because the node already exists. Replace // Uses of the old node with the new one. - if (Res != Node) + if (Res != Node) { CurDAG->ReplaceAllUsesWith(Node, Res); + CurDAG->RemoveDeadNode(Node); + } return Res; } @@ -2534,7 +2668,6 @@ static unsigned IsPredicateKnownToFail(const unsigned char *Table, } namespace { - struct MatchScope { /// FailIndex - If this match fails, this is the index to continue with. unsigned FailIndex; @@ -2552,7 +2685,7 @@ struct MatchScope { SDValue InputChain, InputGlue; /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty. - bool HasChainNodesMatched, HasGlueResultNodesMatched; + bool HasChainNodesMatched; }; /// \\brief A DAG update listener to keep the matching state @@ -2591,11 +2724,11 @@ public: J.setNode(E); } }; -} +} // end anonymous namespace -SDNode *SelectionDAGISel:: -SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, - unsigned TableSize) { +void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch, + const unsigned char *MatcherTable, + unsigned TableSize) { // FIXME: Should these even be selected? Handle these cases in the caller? switch (NodeToMatch->getOpcode()) { default: @@ -2623,16 +2756,25 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, case ISD::LIFETIME_START: case ISD::LIFETIME_END: NodeToMatch->setNodeId(-1); // Mark selected. - return nullptr; + return; case ISD::AssertSext: case ISD::AssertZext: CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, 0), NodeToMatch->getOperand(0)); - return nullptr; - case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch); - case ISD::READ_REGISTER: return Select_READ_REGISTER(NodeToMatch); - case ISD::WRITE_REGISTER: return Select_WRITE_REGISTER(NodeToMatch); - case ISD::UNDEF: return Select_UNDEF(NodeToMatch); + CurDAG->RemoveDeadNode(NodeToMatch); + return; + case ISD::INLINEASM: + Select_INLINEASM(NodeToMatch); + return; + case ISD::READ_REGISTER: + Select_READ_REGISTER(NodeToMatch); + return; + case ISD::WRITE_REGISTER: + Select_WRITE_REGISTER(NodeToMatch); + return; + case ISD::UNDEF: + Select_UNDEF(NodeToMatch); + return; } assert(!NodeToMatch->isMachineOpcode() && "Node already selected!"); @@ -2665,7 +2807,6 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, // which ones they are. The result is captured into this list so that we can // update the chain results when the pattern is complete. SmallVector<SDNode*, 3> ChainNodesMatched; - SmallVector<SDNode*, 3> GlueResultNodesMatched; DEBUG(dbgs() << "ISEL: Starting pattern match on root node: "; NodeToMatch->dump(CurDAG); @@ -2771,7 +2912,6 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, NewEntry.InputChain = InputChain; NewEntry.InputGlue = InputGlue; NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty(); - NewEntry.HasGlueResultNodesMatched = !GlueResultNodesMatched.empty(); MatchScopes.push_back(NewEntry); continue; } @@ -2816,6 +2956,18 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, continue; } + case OPC_MoveChild0: case OPC_MoveChild1: + case OPC_MoveChild2: case OPC_MoveChild3: + case OPC_MoveChild4: case OPC_MoveChild5: + case OPC_MoveChild6: case OPC_MoveChild7: { + unsigned ChildNo = Opcode-OPC_MoveChild0; + if (ChildNo >= N.getNumOperands()) + break; // Match fails if out of range child #. + N = N.getOperand(ChildNo); + NodeStack.push_back(N); + continue; + } + case OPC_MoveParent: // Pop the current node off the NodeStack. NodeStack.pop_back(); @@ -3028,12 +3180,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, if (Imm->getOpcode() == ISD::Constant) { const ConstantInt *Val=cast<ConstantSDNode>(Imm)->getConstantIntValue(); - Imm = CurDAG->getConstant(*Val, SDLoc(NodeToMatch), Imm.getValueType(), - true); + Imm = CurDAG->getTargetConstant(*Val, SDLoc(NodeToMatch), + Imm.getValueType()); } else if (Imm->getOpcode() == ISD::ConstantFP) { const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue(); - Imm = CurDAG->getConstantFP(*Val, SDLoc(NodeToMatch), - Imm.getValueType(), true); + Imm = CurDAG->getTargetConstantFP(*Val, SDLoc(NodeToMatch), + Imm.getValueType()); } RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second)); @@ -3041,7 +3193,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, } case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0 - case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1 + case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1 + case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2 // These are space-optimized forms of OPC_EmitMergeInputChains. assert(!InputChain.getNode() && "EmitMergeInputChains should be the first chain producing node"); @@ -3049,7 +3202,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, "Should only have one EmitMergeInputChains per match"); // Read all of the chained nodes. - unsigned RecNo = Opcode == OPC_EmitMergeInputChains1_1; + unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0; assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains"); ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode()); @@ -3137,13 +3290,22 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, continue; } - case OPC_EmitNode: - case OPC_MorphNodeTo: { + case OPC_EmitNode: case OPC_MorphNodeTo: + case OPC_EmitNode0: case OPC_EmitNode1: case OPC_EmitNode2: + case OPC_MorphNodeTo0: case OPC_MorphNodeTo1: case OPC_MorphNodeTo2: { uint16_t TargetOpc = MatcherTable[MatcherIndex++]; TargetOpc |= (unsigned short)MatcherTable[MatcherIndex++] << 8; unsigned EmitNodeInfo = MatcherTable[MatcherIndex++]; // Get the result VT list. - unsigned NumVTs = MatcherTable[MatcherIndex++]; + unsigned NumVTs; + // If this is one of the compressed forms, get the number of VTs based + // on the Opcode. Otherwise read the next byte from the table. + if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2) + NumVTs = Opcode - OPC_MorphNodeTo0; + else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2) + NumVTs = Opcode - OPC_EmitNode0; + else + NumVTs = MatcherTable[MatcherIndex++]; SmallVector<EVT, 4> VTs; for (unsigned i = 0; i != NumVTs; ++i) { MVT::SimpleValueType VT = @@ -3205,7 +3367,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, // Create the node. SDNode *Res = nullptr; - if (Opcode != OPC_MorphNodeTo) { + bool IsMorphNodeTo = Opcode == OPC_MorphNodeTo || + (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2); + if (!IsMorphNodeTo) { // If this is a normal EmitNode command, just create the new node and // add the results to the RecordedNodes list. Res = CurDAG->getMachineNode(TargetOpc, SDLoc(NodeToMatch), @@ -3218,13 +3382,17 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, nullptr)); } - } else if (NodeToMatch->getOpcode() != ISD::DELETED_NODE) { - Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops, EmitNodeInfo); } else { - // NodeToMatch was eliminated by CSE when the target changed the DAG. - // We will visit the equivalent node later. - DEBUG(dbgs() << "Node was eliminated by CSE\n"); - return nullptr; + assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE && + "NodeToMatch was removed partway through selection"); + SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N, + SDNode *E) { + auto &Chain = ChainNodesMatched; + assert((!E || llvm::find(Chain, N) == Chain.end()) && + "Chain node replaced during MorphNode"); + Chain.erase(std::remove(Chain.begin(), Chain.end(), N), Chain.end()); + }); + Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops, EmitNodeInfo); } // If the node had chain/glue results, update our notion of the current @@ -3285,31 +3453,14 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, } DEBUG(dbgs() << " " - << (Opcode == OPC_MorphNodeTo ? "Morphed" : "Created") + << (IsMorphNodeTo ? "Morphed" : "Created") << " node: "; Res->dump(CurDAG); dbgs() << "\n"); // If this was a MorphNodeTo then we're completely done! - if (Opcode == OPC_MorphNodeTo) { - // Update chain and glue uses. - UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched, - InputGlue, GlueResultNodesMatched, true); - return Res; - } - - continue; - } - - case OPC_MarkGlueResults: { - unsigned NumNodes = MatcherTable[MatcherIndex++]; - - // Read and remember all the glue-result nodes. - for (unsigned i = 0; i != NumNodes; ++i) { - unsigned RecNo = MatcherTable[MatcherIndex++]; - if (RecNo & 128) - RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex); - - assert(RecNo < RecordedNodes.size() && "Invalid MarkGlueResults"); - GlueResultNodesMatched.push_back(RecordedNodes[RecNo].first.getNode()); + if (IsMorphNodeTo) { + // Update chain uses. + UpdateChains(Res, InputChain, ChainNodesMatched, true); + return; } continue; } @@ -3341,20 +3492,24 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, CurDAG->ReplaceAllUsesOfValueWith(SDValue(NodeToMatch, i), Res); } - // If the root node defines glue, add it to the glue nodes to update list. - if (NodeToMatch->getValueType(NodeToMatch->getNumValues()-1) == MVT::Glue) - GlueResultNodesMatched.push_back(NodeToMatch); + // Update chain uses. + UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, false); - // Update chain and glue uses. - UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched, - InputGlue, GlueResultNodesMatched, false); + // If the root node defines glue, we need to update it to the glue result. + // TODO: This never happens in our tests and I think it can be removed / + // replaced with an assert, but if we do it this the way the change is + // NFC. + if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) == + MVT::Glue && + InputGlue.getNode()) + CurDAG->ReplaceAllUsesOfValueWith( + SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1), InputGlue); assert(NodeToMatch->use_empty() && "Didn't replace all uses of the node?"); + CurDAG->RemoveDeadNode(NodeToMatch); - // FIXME: We just return here, which interacts correctly with SelectRoot - // above. We should fix this to not return an SDNode* anymore. - return nullptr; + return; } } @@ -3366,7 +3521,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, while (1) { if (MatchScopes.empty()) { CannotYetSelect(NodeToMatch); - return nullptr; + return; } // Restore the interpreter state back to the point where the scope was @@ -3387,8 +3542,6 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, InputGlue = LastScope.InputGlue; if (!LastScope.HasChainNodesMatched) ChainNodesMatched.clear(); - if (!LastScope.HasGlueResultNodesMatched) - GlueResultNodesMatched.clear(); // Check to see what the offset is at the new MatcherIndex. If it is zero // we have reached the end of this scope, otherwise we have another child @@ -3411,8 +3564,6 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, } } - - void SelectionDAGISel::CannotYetSelect(SDNode *N) { std::string msg; raw_string_ostream Msg(msg); |