diff options
Diffstat (limited to 'contrib/llvm/lib/Target/R600/SIInstrInfo.cpp')
-rw-r--r-- | contrib/llvm/lib/Target/R600/SIInstrInfo.cpp | 299 |
1 files changed, 231 insertions, 68 deletions
diff --git a/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp b/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp index 5ab33b4..d647c25 100644 --- a/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp +++ b/contrib/llvm/lib/Target/R600/SIInstrInfo.cpp @@ -28,8 +28,7 @@ using namespace llvm; SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st) - : AMDGPUInstrInfo(st), - RI(st) { } + : AMDGPUInstrInfo(st), RI() {} //===----------------------------------------------------------------------===// // TargetInstrInfo callbacks @@ -75,6 +74,20 @@ static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); } +bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, + AliasAnalysis *AA) const { + // TODO: The generic check fails for VALU instructions that should be + // rematerializable due to implicit reads of exec. We really want all of the + // generic logic for this except for this. + switch (MI->getOpcode()) { + case AMDGPU::V_MOV_B32_e32: + case AMDGPU::V_MOV_B32_e64: + return true; + default: + return false; + } +} + bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, int64_t &Offset0, int64_t &Offset1) const { @@ -427,7 +440,9 @@ SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, } } -unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const { +unsigned SIInstrInfo::commuteOpcode(const MachineInstr &MI) const { + const unsigned Opcode = MI.getOpcode(); + int NewOpc; // Try to map original to commuted opcode @@ -566,7 +581,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB, unsigned Size) const { MachineFunction *MF = MBB.getParent(); SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); - const AMDGPUSubtarget &ST = MF->getTarget().getSubtarget<AMDGPUSubtarget>(); + const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>(); const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo*>(ST.getRegisterInfo()); DebugLoc DL = MBB.findDebugLoc(MI); @@ -592,10 +607,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB, unsigned TIDIGZReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_Z); unsigned InputPtrReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::INPUT_PTR); - static const unsigned TIDIGRegs[3] = { - TIDIGXReg, TIDIGYReg, TIDIGZReg - }; - for (unsigned Reg : TIDIGRegs) { + for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { if (!Entry.isLiveIn(Reg)) Entry.addLiveIn(Reg); } @@ -729,6 +741,26 @@ bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { MI->eraseFromParent(); break; } + + case AMDGPU::V_CNDMASK_B64_PSEUDO: { + unsigned Dst = MI->getOperand(0).getReg(); + unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); + unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); + unsigned Src0 = MI->getOperand(1).getReg(); + unsigned Src1 = MI->getOperand(2).getReg(); + const MachineOperand &SrcCond = MI->getOperand(3); + + BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo) + .addReg(RI.getSubReg(Src0, AMDGPU::sub0)) + .addReg(RI.getSubReg(Src1, AMDGPU::sub0)) + .addOperand(SrcCond); + BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi) + .addReg(RI.getSubReg(Src0, AMDGPU::sub1)) + .addReg(RI.getSubReg(Src1, AMDGPU::sub1)) + .addOperand(SrcCond); + MI->eraseFromParent(); + break; + } } return true; } @@ -759,7 +791,7 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI, (!isOperandLegal(MI, Src0Idx, &Src1) || !isOperandLegal(MI, Src1Idx, &Src0))) { return nullptr; - } + } if (!Src1.isReg()) { // Allow commuting instructions with Imm operands. @@ -801,7 +833,7 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI, } if (MI) - MI->setDesc(get(commuteOpcode(MI->getOpcode()))); + MI->setDesc(get(commuteOpcode(*MI))); return MI; } @@ -868,6 +900,134 @@ SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { return RC != &AMDGPU::EXECRegRegClass; } +static void removeModOperands(MachineInstr &MI) { + unsigned Opc = MI.getOpcode(); + int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, + AMDGPU::OpName::src0_modifiers); + int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, + AMDGPU::OpName::src1_modifiers); + int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, + AMDGPU::OpName::src2_modifiers); + + MI.RemoveOperand(Src2ModIdx); + MI.RemoveOperand(Src1ModIdx); + MI.RemoveOperand(Src0ModIdx); +} + +bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, + unsigned Reg, MachineRegisterInfo *MRI) const { + if (!MRI->hasOneNonDBGUse(Reg)) + return false; + + unsigned Opc = UseMI->getOpcode(); + if (Opc == AMDGPU::V_MAD_F32) { + // Don't fold if we are using source modifiers. The new VOP2 instructions + // don't have them. + if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) || + hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) || + hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) { + return false; + } + + MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0); + MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1); + MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2); + + // Multiplied part is the constant: Use v_madmk_f32 + // We should only expect these to be on src0 due to canonicalizations. + if (Src0->isReg() && Src0->getReg() == Reg) { + if (!Src1->isReg() || + (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) + return false; + + if (!Src2->isReg() || + (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))) + return false; + + // We need to do some weird looking operand shuffling since the madmk + // operands are out of the normal expected order with the multiplied + // constant as the last operand. + // + // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1 + // src0 -> src2 K + // src1 -> src0 + // src2 -> src1 + + const int64_t Imm = DefMI->getOperand(1).getImm(); + + // FIXME: This would be a lot easier if we could return a new instruction + // instead of having to modify in place. + + // Remove these first since they are at the end. + UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32, + AMDGPU::OpName::omod)); + UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32, + AMDGPU::OpName::clamp)); + + unsigned Src1Reg = Src1->getReg(); + unsigned Src1SubReg = Src1->getSubReg(); + unsigned Src2Reg = Src2->getReg(); + unsigned Src2SubReg = Src2->getSubReg(); + Src0->setReg(Src1Reg); + Src0->setSubReg(Src1SubReg); + Src0->setIsKill(Src1->isKill()); + + Src1->setReg(Src2Reg); + Src1->setSubReg(Src2SubReg); + Src1->setIsKill(Src2->isKill()); + + Src2->ChangeToImmediate(Imm); + + removeModOperands(*UseMI); + UseMI->setDesc(get(AMDGPU::V_MADMK_F32)); + + bool DeleteDef = MRI->hasOneNonDBGUse(Reg); + if (DeleteDef) + DefMI->eraseFromParent(); + + return true; + } + + // Added part is the constant: Use v_madak_f32 + if (Src2->isReg() && Src2->getReg() == Reg) { + // Not allowed to use constant bus for another operand. + // We can however allow an inline immediate as src0. + if (!Src0->isImm() && + (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) + return false; + + if (!Src1->isReg() || + (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) + return false; + + const int64_t Imm = DefMI->getOperand(1).getImm(); + + // FIXME: This would be a lot easier if we could return a new instruction + // instead of having to modify in place. + + // Remove these first since they are at the end. + UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32, + AMDGPU::OpName::omod)); + UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(AMDGPU::V_MAD_F32, + AMDGPU::OpName::clamp)); + + Src2->ChangeToImmediate(Imm); + + // These come before src2. + removeModOperands(*UseMI); + UseMI->setDesc(get(AMDGPU::V_MADAK_F32)); + + bool DeleteDef = MRI->hasOneNonDBGUse(Reg); + if (DeleteDef) + DefMI->eraseFromParent(); + + return true; + } + } + + return false; +} + bool SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI, AliasAnalysis *AA) const { @@ -1001,15 +1161,25 @@ bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { (FloatToBits(-4.0f) == Val); } -bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const { - if (MO.isImm()) - return isInlineConstant(APInt(32, MO.getImm(), true)); +bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, + unsigned OpSize) const { + if (MO.isImm()) { + // MachineOperand provides no way to tell the true operand size, since it + // only records a 64-bit value. We need to know the size to determine if a + // 32-bit floating point immediate bit pattern is legal for an integer + // immediate. It would be for any 32-bit integer operand, but would not be + // for a 64-bit one. + + unsigned BitSize = 8 * OpSize; + return isInlineConstant(APInt(BitSize, MO.getImm(), true)); + } return false; } -bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const { - return MO.isImm() && !isInlineConstant(MO); +bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO, + unsigned OpSize) const { + return MO.isImm() && !isInlineConstant(MO, OpSize); } static bool compareMachineOp(const MachineOperand &Op0, @@ -1039,38 +1209,13 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo, if (OpInfo.RegClass < 0) return false; - if (isLiteralConstant(MO)) + unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); + if (isLiteralConstant(MO, OpSize)) return RI.opCanUseLiteralConstant(OpInfo.OperandType); return RI.opCanUseInlineConstant(OpInfo.OperandType); } -bool SIInstrInfo::canFoldOffset(unsigned OffsetSize, unsigned AS) const { - switch (AS) { - case AMDGPUAS::GLOBAL_ADDRESS: { - // MUBUF instructions a 12-bit offset in bytes. - return isUInt<12>(OffsetSize); - } - case AMDGPUAS::CONSTANT_ADDRESS: { - // SMRD instructions have an 8-bit offset in dwords on SI and - // a 20-bit offset in bytes on VI. - if (RI.ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) - return isUInt<20>(OffsetSize); - else - return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); - } - case AMDGPUAS::LOCAL_ADDRESS: - case AMDGPUAS::REGION_ADDRESS: { - // The single offset versions have a 16-bit offset in bytes. - return isUInt<16>(OffsetSize); - } - case AMDGPUAS::PRIVATE_ADDRESS: - // Indirect register addressing does not use any offsets. - default: - return 0; - } -} - bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { int Op32 = AMDGPU::getVOPe32(Opcode); if (Op32 == -1) @@ -1094,9 +1239,10 @@ bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, } bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, - const MachineOperand &MO) const { + const MachineOperand &MO, + unsigned OpSize) const { // Literal constants use the constant bus. - if (isLiteralConstant(MO)) + if (isLiteralConstant(MO, OpSize)) return true; if (!MO.isReg() || !MO.isUse()) @@ -1152,7 +1298,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI, switch (Desc.OpInfo[i].OperandType) { case MCOI::OPERAND_REGISTER: - if (MI->getOperand(i).isImm() || MI->getOperand(i).isFPImm()) { + if (MI->getOperand(i).isImm()) { ErrInfo = "Illegal immediate value for operand."; return false; } @@ -1160,7 +1306,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI, case AMDGPU::OPERAND_REG_IMM32: break; case AMDGPU::OPERAND_REG_INLINE_C: - if (isLiteralConstant(MI->getOperand(i))) { + if (isLiteralConstant(MI->getOperand(i), + RI.getRegClass(RegClass)->getSize())) { ErrInfo = "Illegal immediate value for operand."; return false; } @@ -1207,9 +1354,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI, for (int OpIdx : OpIndices) { if (OpIdx == -1) break; - const MachineOperand &MO = MI->getOperand(OpIdx); - if (usesConstantBus(MRI, MO)) { + if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) { if (MO.isReg()) { if (MO.getReg() != SGPRUsed) ++ConstantBusCount; @@ -1277,6 +1423,7 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; + case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; @@ -1313,7 +1460,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, if (TargetRegisterInfo::isVirtualRegister(Reg)) return MRI.getRegClass(Reg); - return RI.getRegClass(Reg); + return RI.getPhysRegClass(Reg); } unsigned RCID = Desc.OpInfo[OpNo].RegClass; @@ -1457,14 +1604,16 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx, if (!MO) MO = &MI->getOperand(OpIdx); - if (isVALU(InstDesc.Opcode) && usesConstantBus(MRI, *MO)) { + if (isVALU(InstDesc.Opcode) && + usesConstantBus(MRI, *MO, DefinedRC->getSize())) { unsigned SGPRUsed = MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { if (i == OpIdx) continue; - if (usesConstantBus(MRI, MI->getOperand(i)) && - MI->getOperand(i).isReg() && MI->getOperand(i).getReg() != SGPRUsed) { + const MachineOperand &Op = MI->getOperand(i); + if (Op.isReg() && Op.getReg() != SGPRUsed && + usesConstantBus(MRI, Op, getOpSize(*MI, i))) { return false; } } @@ -1557,7 +1706,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { // We can use one SGPR in each VOP3 instruction. continue; } - } else if (!isLiteralConstant(MO)) { + } else if (!isLiteralConstant(MO, getOpSize(MI->getOpcode(), Idx))) { // If it is not a register and not a literal constant, then it must be // an inline constant which is always legal. continue; @@ -1730,20 +1879,21 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata); MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset); MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset); - assert(SOffset->isImm() && SOffset->getImm() == 0 && "Legalizing MUBUF " - "with non-zero soffset is not implemented"); - (void)SOffset; // Create the new instruction. unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode()); MachineInstr *Addr64 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) .addOperand(*VData) - .addOperand(*SRsrc) .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. // This will be replaced later // with the new value of vaddr. - .addOperand(*Offset); + .addOperand(*SRsrc) + .addOperand(*SOffset) + .addOperand(*Offset) + .addImm(0) // glc + .addImm(0) // slc + .addImm(0); // tfe MI->removeFromParent(); MI = Addr64; @@ -1787,14 +1937,20 @@ void SIInstrInfo::splitSMRD(MachineInstr *MI, // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes // on VI. + + bool IsKill = SBase->isKill(); if (OffOp) { - bool isVI = RI.ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS; + bool isVI = + MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >= + AMDGPUSubtarget::VOLCANIC_ISLANDS; unsigned OffScale = isVI ? 1 : 4; // Handle the _IMM variant unsigned LoOffset = OffOp->getImm() * OffScale; unsigned HiOffset = LoOffset + HalfSize; Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo) - .addOperand(*SBase) + // Use addReg instead of addOperand + // to make sure kill flag is cleared. + .addReg(SBase->getReg(), 0, SBase->getSubReg()) .addImm(LoOffset / OffScale); if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) { @@ -1803,25 +1959,28 @@ void SIInstrInfo::splitSMRD(MachineInstr *MI, BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR) .addImm(HiOffset); // The offset in register is in bytes. Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) - .addOperand(*SBase) + .addReg(SBase->getReg(), getKillRegState(IsKill), + SBase->getSubReg()) .addReg(OffsetSGPR); } else { Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi) - .addOperand(*SBase) + .addReg(SBase->getReg(), getKillRegState(IsKill), + SBase->getSubReg()) .addImm(HiOffset / OffScale); } } else { // Handle the _SGPR variant MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff); Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo) - .addOperand(*SBase) + .addReg(SBase->getReg(), 0, SBase->getSubReg()) .addOperand(*SOff); unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR) .addOperand(*SOff) .addImm(HalfSize); Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp)) - .addOperand(*SBase) + .addReg(SBase->getReg(), getKillRegState(IsKill), + SBase->getSubReg()) .addReg(OffsetSGPR); } @@ -1876,7 +2035,8 @@ void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) con // SMRD instructions take a dword offsets on SI and byte offset on VI // and MUBUF instructions always take a byte offset. ImmOffset = MI->getOperand(2).getImm(); - if (RI.ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS) + if (MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <= + AMDGPUSubtarget::SEA_ISLANDS) ImmOffset <<= 2; RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); @@ -1916,12 +2076,15 @@ void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) con .addImm(AMDGPU::sub3); MI->setDesc(get(NewOpcode)); if (MI->getOperand(2).isReg()) { - MI->getOperand(2).setReg(MI->getOperand(1).getReg()); + MI->getOperand(2).setReg(SRsrc); } else { - MI->getOperand(2).ChangeToRegister(MI->getOperand(1).getReg(), false); + MI->getOperand(2).ChangeToRegister(SRsrc, false); } - MI->getOperand(1).setReg(SRsrc); + MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0)); MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(ImmOffset)); + MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0)); // glc + MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0)); // slc + MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(0)); // tfe const TargetRegisterClass *NewDstRC = RI.getRegClass(get(NewOpcode).OpInfo[0].RegClass); |