diff options
Diffstat (limited to 'lib/Target/X86/X86InstrInfo.cpp')
-rw-r--r-- | lib/Target/X86/X86InstrInfo.cpp | 60 |
1 files changed, 10 insertions, 50 deletions
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 63e78de..246804e 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -23,7 +23,6 @@ #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/IR/DerivedTypes.h" @@ -4453,7 +4452,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, // such as TF/IF/DF, which LLVM doesn't model. // // Notice that we have to adjust the stack if we don't want to clobber the - // first frame index. See X86FrameLowering.cpp - usesTheStack. + // first frame index. + // See X86ISelLowering.cpp - X86::hasCopyImplyingStackAdjustment. bool AXDead = (Reg == AX) || @@ -4465,6 +4465,10 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, // (unnecessarily) saving+restoring a dead register. However the // MachineVerifier expects operands that read from dead registers // to be marked with the "undef" flag. + // An example of this can be found in + // test/CodeGen/X86/peephole-na-phys-copy-folding.ll and + // test/CodeGen/X86/cmpxchg-clobber-flags.ll when using + // -verify-machineinstrs. BuildMI(MBB, MI, DL, get(Push)).addReg(AX, getKillRegState(true)); } if (FromEFLAGS) { @@ -5309,50 +5313,6 @@ static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, return true; } -bool X86InstrInfo::ExpandMOVImmSExti8(MachineInstrBuilder &MIB) const { - MachineBasicBlock &MBB = *MIB->getParent(); - DebugLoc DL = MIB->getDebugLoc(); - int64_t Imm = MIB->getOperand(1).getImm(); - assert(Imm != 0 && "Using push/pop for 0 is not efficient."); - MachineBasicBlock::iterator I = MIB.getInstr(); - - int StackAdjustment; - - if (Subtarget.is64Bit()) { - assert(MIB->getOpcode() == X86::MOV64ImmSExti8 || - MIB->getOpcode() == X86::MOV32ImmSExti8); - // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and - // widen the register if necessary. - StackAdjustment = 8; - BuildMI(MBB, I, DL, get(X86::PUSH64i8)).addImm(Imm); - MIB->setDesc(get(X86::POP64r)); - MIB->getOperand(0) - .setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64)); - } else { - assert(MIB->getOpcode() == X86::MOV32ImmSExti8); - StackAdjustment = 4; - BuildMI(MBB, I, DL, get(X86::PUSH32i8)).addImm(Imm); - MIB->setDesc(get(X86::POP32r)); - } - - // Build CFI if necessary. - MachineFunction &MF = *MBB.getParent(); - const X86FrameLowering *TFL = Subtarget.getFrameLowering(); - bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); - bool NeedsDwarfCFI = - !IsWin64Prologue && - (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry()); - bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI; - if (EmitCFI) { - TFL->BuildCFI(MBB, I, DL, - MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment)); - TFL->BuildCFI(MBB, std::next(I), DL, - MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment)); - } - - return true; -} - // LoadStackGuard has so far only been implemented for 64-bit MachO. Different // code sequence is needed for other targets. static void expandLoadStackGuard(MachineInstrBuilder &MIB, @@ -5385,9 +5345,6 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { return expandMOV32r1(MIB, *this, /*MinusOne=*/ false); case X86::MOV32r_1: return expandMOV32r1(MIB, *this, /*MinusOne=*/ true); - case X86::MOV32ImmSExti8: - case X86::MOV64ImmSExti8: - return ExpandMOVImmSExti8(MIB); case X86::SETB_C8r: return Expand2AddrUndef(MIB, get(X86::SBB8rr)); case X86::SETB_C16r: @@ -5412,7 +5369,10 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { case X86::TEST8ri_NOREX: MI->setDesc(get(X86::TEST8ri)); return true; - + case X86::MOV32ri64: + MI->setDesc(get(X86::MOV32ri)); + return true; + // KNL does not recognize dependency-breaking idioms for mask registers, // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1. // Using %k0 as the undef input register is a performance heuristic based |