diff options
Diffstat (limited to 'lib/Target/Hexagon')
41 files changed, 12273 insertions, 2528 deletions
diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt index af9e813..1f2d8ac 100644 --- a/lib/Target/Hexagon/CMakeLists.txt +++ b/lib/Target/Hexagon/CMakeLists.txt @@ -28,8 +28,12 @@ add_llvm_target(HexagonCodeGen HexagonSubtarget.cpp HexagonTargetMachine.cpp HexagonTargetObjectFile.cpp + HexagonVLIWPacketizer.cpp + HexagonNewValueJump.cpp ) +add_dependencies(LLVMHexagonCodeGen intrinsics_gen) + add_subdirectory(TargetInfo) add_subdirectory(InstPrinter) add_subdirectory(MCTargetDesc) diff --git a/lib/Target/Hexagon/Hexagon.h b/lib/Target/Hexagon/Hexagon.h index 0808323..45f857b 100644 --- a/lib/Target/Hexagon/Hexagon.h +++ b/lib/Target/Hexagon/Hexagon.h @@ -40,6 +40,9 @@ namespace llvm { FunctionPass *createHexagonHardwareLoops(); FunctionPass *createHexagonPeephole(); FunctionPass *createHexagonFixupHwLoops(); + FunctionPass *createHexagonPacketizer(); + FunctionPass *createHexagonNewValueJump(); + /* TODO: object output. MCCodeEmitter *createHexagonMCCodeEmitter(const Target &, @@ -47,7 +50,8 @@ namespace llvm { MCContext &Ctx); */ /* TODO: assembler input. - TargetAsmBackend *createHexagonAsmBackend(const Target &, const std::string &); + TargetAsmBackend *createHexagonAsmBackend(const Target &, + const std::string &); */ void HexagonLowerToMC(const MachineInstr *MI, MCInst &MCI, HexagonAsmPrinter &AP); @@ -67,7 +71,7 @@ namespace llvm { // Normal instruction size (in bytes). #define HEXAGON_INSTR_SIZE 4 -// Maximum number of words in a packet (in instructions). +// Maximum number of words and instructions in a packet. #define HEXAGON_PACKET_SIZE 4 #endif diff --git a/lib/Target/Hexagon/Hexagon.td b/lib/Target/Hexagon/Hexagon.td index 4a50d16..451e562 100644 --- a/lib/Target/Hexagon/Hexagon.td +++ b/lib/Target/Hexagon/Hexagon.td @@ -28,6 +28,8 @@ def ArchV3 : SubtargetFeature<"v3", "HexagonArchVersion", "V3", "Hexagon v3">; def ArchV4 : SubtargetFeature<"v4", "HexagonArchVersion", "V4", "Hexagon v4">; +def ArchV5 : SubtargetFeature<"v5", "HexagonArchVersion", "V5", + "Hexagon v5">; //===----------------------------------------------------------------------===// // Register File, Calling Conv, Instruction Descriptions @@ -45,13 +47,15 @@ def HexagonInstrInfo : InstrInfo; // Hexagon processors supported. //===----------------------------------------------------------------------===// -class Proc<string Name, ProcessorItineraries Itin, +class Proc<string Name, SchedMachineModel Model, list<SubtargetFeature> Features> - : Processor<Name, Itin, Features>; + : ProcessorModel<Name, Model, Features>; + +def : Proc<"hexagonv2", HexagonModel, [ArchV2]>; +def : Proc<"hexagonv3", HexagonModel, [ArchV2, ArchV3]>; +def : Proc<"hexagonv4", HexagonModelV4, [ArchV2, ArchV3, ArchV4]>; +def : Proc<"hexagonv5", HexagonModelV4, [ArchV2, ArchV3, ArchV4, ArchV5]>; -def : Proc<"hexagonv2", HexagonItineraries, [ArchV2]>; -def : Proc<"hexagonv3", HexagonItineraries, [ArchV2, ArchV3]>; -def : Proc<"hexagonv4", HexagonItinerariesV4, [ArchV2, ArchV3, ArchV4]>; // Hexagon Uses the MC printer for assembler output, so make sure the TableGen // AsmWriter bits get associated with the correct class. diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp index 39bf45d..5fa4740 100644 --- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp +++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp @@ -13,11 +13,11 @@ // //===----------------------------------------------------------------------===// - #define DEBUG_TYPE "asm-printer" #include "Hexagon.h" #include "HexagonAsmPrinter.h" #include "HexagonMachineFunctionInfo.h" +#include "HexagonMCInst.h" #include "HexagonTargetMachine.h" #include "HexagonSubtarget.h" #include "InstPrinter/HexagonInstPrinter.h" @@ -77,8 +77,7 @@ void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, const MachineOperand &MO = MI->getOperand(OpNo); switch (MO.getType()) { - default: - assert(0 && "<unknown operand type>"); + default: llvm_unreachable ("<unknown operand type>"); case MachineOperand::MO_Register: O << HexagonInstPrinter::getRegisterName(MO.getReg()); return; @@ -134,7 +133,9 @@ bool HexagonAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, if (ExtraCode[1] != 0) return true; // Unknown modifier. switch (ExtraCode[0]) { - default: return true; // Unknown modifier. + default: + // See if this is a generic print operand + return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, OS); case 'c': // Don't print "$" before a global var name or constant. // Hexagon never has a prefix. printOperand(MI, OpNo, OS); @@ -196,10 +197,45 @@ void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI, /// the current output stream. /// void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) { - MCInst MCI; - - HexagonLowerToMC(MI, MCI, *this); - OutStreamer.EmitInstruction(MCI); + if (MI->isBundle()) { + std::vector<const MachineInstr*> BundleMIs; + + const MachineBasicBlock *MBB = MI->getParent(); + MachineBasicBlock::const_instr_iterator MII = MI; + ++MII; + unsigned int IgnoreCount = 0; + while (MII != MBB->end() && MII->isInsideBundle()) { + const MachineInstr *MInst = MII; + if (MInst->getOpcode() == TargetOpcode::DBG_VALUE || + MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) { + IgnoreCount++; + ++MII; + continue; + } + //BundleMIs.push_back(&*MII); + BundleMIs.push_back(MInst); + ++MII; + } + unsigned Size = BundleMIs.size(); + assert((Size+IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!"); + for (unsigned Index = 0; Index < Size; Index++) { + HexagonMCInst MCI; + MCI.setStartPacket(Index == 0); + MCI.setEndPacket(Index == (Size-1)); + + HexagonLowerToMC(BundleMIs[Index], MCI, *this); + OutStreamer.EmitInstruction(MCI); + } + } + else { + HexagonMCInst MCI; + if (MI->getOpcode() == Hexagon::ENDLOOP0) { + MCI.setStartPacket(true); + MCI.setEndPacket(true); + } + HexagonLowerToMC(MI, MCI, *this); + OutStreamer.EmitInstruction(MCI); + } return; } @@ -241,15 +277,15 @@ void HexagonAsmPrinter::printGlobalOperand(const MachineInstr *MI, int OpNo, void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo, raw_ostream &O) { const MachineOperand &MO = MI->getOperand(OpNo); - assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) && - "Expecting jump table index"); + assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) && + "Expecting jump table index"); // Hexagon_TODO: Do we need name mangling? O << *GetJTISymbol(MO.getIndex()); } void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int OpNo, - raw_ostream &O) { + raw_ostream &O) { const MachineOperand &MO = MI->getOperand(OpNo); assert( (MO.getType() == MachineOperand::MO_ConstantPoolIndex) && "Expecting constant pool index"); diff --git a/lib/Target/Hexagon/HexagonCallingConv.td b/lib/Target/Hexagon/HexagonCallingConv.td index bd9608b..e61b2a7 100644 --- a/lib/Target/Hexagon/HexagonCallingConv.td +++ b/lib/Target/Hexagon/HexagonCallingConv.td @@ -17,8 +17,8 @@ // Hexagon 32-bit C return-value convention. def RetCC_Hexagon32 : CallingConv<[ - CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, - CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>, + CCIfType<[i32, f32], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, + CCIfType<[i64, f64], CCAssignToReg<[D0, D1, D2]>>, // Alternatively, they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> @@ -27,8 +27,8 @@ def RetCC_Hexagon32 : CallingConv<[ // Hexagon 32-bit C Calling convention. def CC_Hexagon32 : CallingConv<[ // All arguments get passed in integer registers if there is space. - CCIfType<[i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, - CCIfType<[i64], CCAssignToReg<[D0, D1, D2]>>, + CCIfType<[f32, i32, i16, i8], CCAssignToReg<[R0, R1, R2, R3, R4, R5]>>, + CCIfType<[f64, i64], CCAssignToReg<[D0, D1, D2]>>, // Alternatively, they are assigned to the stack in 4-byte aligned units. CCAssignToStack<4, 4> diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/lib/Target/Hexagon/HexagonCallingConvLower.cpp index 46c20e9..ba8e679 100644 --- a/lib/Target/Hexagon/HexagonCallingConvLower.cpp +++ b/lib/Target/Hexagon/HexagonCallingConvLower.cpp @@ -56,11 +56,8 @@ void Hexagon_CCState::HandleByVal(unsigned ValNo, EVT ValVT, /// MarkAllocated - Mark a register and all of its aliases as allocated. void Hexagon_CCState::MarkAllocated(unsigned Reg) { - UsedRegs[Reg/32] |= 1 << (Reg&31); - - if (const uint16_t *RegAliases = TRI.getAliasSet(Reg)) - for (; (Reg = *RegAliases); ++RegAliases) - UsedRegs[Reg/32] |= 1 << (Reg&31); + for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) + UsedRegs[*AI/32] |= 1 << (*AI&31); } /// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node, diff --git a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp index 2100474..ae2ca37 100644 --- a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp +++ b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp @@ -7,9 +7,9 @@ // //===----------------------------------------------------------------------===// // The Hexagon processor has no instructions that load or store predicate -// registers directly. So, when these registers must be spilled a general -// purpose register must be found and the value copied to/from it from/to -// the predicate register. This code currently does not use the register +// registers directly. So, when these registers must be spilled a general +// purpose register must be found and the value copied to/from it from/to +// the predicate register. This code currently does not use the register // scavenger mechanism available in the allocator. There are two registers // reserved to allow spilling/restoring predicate registers. One is used to // hold the predicate value. The other is used when stack frame offsets are @@ -84,7 +84,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { int SrcReg = MI->getOperand(2).getReg(); assert(Hexagon::PredRegsRegClass.contains(SrcReg) && "Not a predicate register"); - if (!TII->isValidOffset(Hexagon::STriw, Offset)) { + if (!TII->isValidOffset(Hexagon::STriw_indexed, Offset)) { if (!TII->isValidOffset(Hexagon::ADD_ri, Offset)) { BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::CONST32_Int_Real), @@ -95,7 +95,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd), HEXAGON_RESERVED_REG_2).addReg(SrcReg); BuildMI(*MBB, MII, MI->getDebugLoc(), - TII->get(Hexagon::STriw)) + TII->get(Hexagon::STriw_indexed)) .addReg(HEXAGON_RESERVED_REG_1) .addImm(0).addReg(HEXAGON_RESERVED_REG_2); } else { @@ -103,7 +103,8 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { HEXAGON_RESERVED_REG_1).addReg(FP).addImm(Offset); BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd), HEXAGON_RESERVED_REG_2).addReg(SrcReg); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)) + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::STriw_indexed)) .addReg(HEXAGON_RESERVED_REG_1) .addImm(0) .addReg(HEXAGON_RESERVED_REG_2); @@ -111,7 +112,8 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) { } else { BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_RsPd), HEXAGON_RESERVED_REG_2).addReg(SrcReg); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::STriw)). + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::STriw_indexed)). addReg(FP).addImm(Offset).addReg(HEXAGON_RESERVED_REG_2); } MII = MBB->erase(MI); diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index e8a6924..cd682df 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -209,6 +209,16 @@ bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const { FuncInfo->hasClobberLR() ); } +static inline +unsigned uniqueSuperReg(unsigned Reg, const TargetRegisterInfo *TRI) { + MCSuperRegIterator SRI(Reg, TRI); + assert(SRI.isValid() && "Expected a superreg"); + unsigned SuperReg = *SRI; + ++SRI; + assert(!SRI.isValid() && "Expected exactly one superreg"); + return SuperReg; +} + bool HexagonFrameLowering::spillCalleeSavedRegisters( MachineBasicBlock &MBB, @@ -235,26 +245,21 @@ HexagonFrameLowering::spillCalleeSavedRegisters( // // Check if we can use a double-word store. // - const uint16_t* SuperReg = TRI->getSuperRegisters(Reg); - - // Assume that there is exactly one superreg. - assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg"); + unsigned SuperReg = uniqueSuperReg(Reg, TRI); bool CanUseDblStore = false; const TargetRegisterClass* SuperRegClass = 0; if (ContiguousRegs && (i < CSI.size()-1)) { - const uint16_t* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg()); - assert(SuperRegNext[0] && !SuperRegNext[1] && - "Expected exactly one superreg"); - SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]); - CanUseDblStore = (SuperRegNext[0] == SuperReg[0]); + unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI); + SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg); + CanUseDblStore = (SuperRegNext == SuperReg); } if (CanUseDblStore) { - TII.storeRegToStackSlot(MBB, MI, SuperReg[0], true, + TII.storeRegToStackSlot(MBB, MI, SuperReg, true, CSI[i+1].getFrameIdx(), SuperRegClass, TRI); - MBB.addLiveIn(SuperReg[0]); + MBB.addLiveIn(SuperReg); ++i; } else { // Cannot use a double-word store. @@ -295,25 +300,20 @@ bool HexagonFrameLowering::restoreCalleeSavedRegisters( // // Check if we can use a double-word load. // - const uint16_t* SuperReg = TRI->getSuperRegisters(Reg); + unsigned SuperReg = uniqueSuperReg(Reg, TRI); const TargetRegisterClass* SuperRegClass = 0; - - // Assume that there is exactly one superreg. - assert(SuperReg[0] && !SuperReg[1] && "Expected exactly one superreg"); bool CanUseDblLoad = false; if (ContiguousRegs && (i < CSI.size()-1)) { - const uint16_t* SuperRegNext = TRI->getSuperRegisters(CSI[i+1].getReg()); - assert(SuperRegNext[0] && !SuperRegNext[1] && - "Expected exactly one superreg"); - SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg[0]); - CanUseDblLoad = (SuperRegNext[0] == SuperReg[0]); + unsigned SuperRegNext = uniqueSuperReg(CSI[i+1].getReg(), TRI); + SuperRegClass = TRI->getMinimalPhysRegClass(SuperReg); + CanUseDblLoad = (SuperRegNext == SuperReg); } if (CanUseDblLoad) { - TII.loadRegFromStackSlot(MBB, MI, SuperReg[0], CSI[i+1].getFrameIdx(), + TII.loadRegFromStackSlot(MBB, MI, SuperReg, CSI[i+1].getFrameIdx(), SuperRegClass, TRI); - MBB.addLiveIn(SuperReg[0]); + MBB.addLiveIn(SuperReg); ++i; } else { // Cannot use a double-word load. diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 57772a5..d756aec 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -328,7 +328,10 @@ CountValue *HexagonHardwareLoops::getTripCount(MachineLoop *L) const { // can get a useful trip count. The trip count can // be either a register or an immediate. The location // of the value depends upon the type (reg or imm). - while ((IV_Opnd = IV_Opnd->getNextOperandForReg())) { + for (MachineRegisterInfo::reg_iterator + RI = MRI->reg_begin(IV_Opnd->getReg()), RE = MRI->reg_end(); + RI != RE; ++RI) { + IV_Opnd = &RI.getOperand(); const MachineInstr *MI = IV_Opnd->getParent(); if (L->contains(MI) && isCompareEqualsImm(MI)) { const MachineOperand &MO = MI->getOperand(2); @@ -491,7 +494,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L) { TII->get(Hexagon::NEG), CountReg).addReg(CountReg1); } - // Add the Loop instruction to the begining of the loop. + // Add the Loop instruction to the beginning of the loop. BuildMI(*Preheader, InsertPos, InsertPos->getDebugLoc(), TII->get(Hexagon::LOOP0_r)).addMBB(LoopStart).addReg(CountReg); } else { @@ -623,7 +626,7 @@ void HexagonFixupHwLoops::convertLoopInstr(MachineFunction &MF, const TargetInstrInfo *TII = MF.getTarget().getInstrInfo(); MachineBasicBlock *MBB = MII->getParent(); DebugLoc DL = MII->getDebugLoc(); - unsigned Scratch = RS.scavengeRegister(Hexagon::IntRegsRegisterClass, MII, 0); + unsigned Scratch = RS.scavengeRegister(&Hexagon::IntRegsRegClass, MII, 0); // First, set the LC0 with the trip count. if (MII->getOperand(1).isReg()) { diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp index 9df965e..5499134 100644 --- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp +++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp @@ -90,7 +90,9 @@ public: SDNode *SelectMul(SDNode *N); SDNode *SelectZeroExtend(SDNode *N); SDNode *SelectIntrinsicWOChain(SDNode *N); + SDNode *SelectIntrinsicWChain(SDNode *N); SDNode *SelectConstant(SDNode *N); + SDNode *SelectConstantFP(SDNode *N); SDNode *SelectAdd(SDNode *N); // Include the pieces autogenerated from the target description. @@ -318,7 +320,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetLoad(LoadSDNode *LD, DebugLoc dl) { else if (LoadedVT == MVT::i32) Opcode = Hexagon::LDriw_indexed; else if (LoadedVT == MVT::i16) Opcode = Hexagon::LDrih_indexed; else if (LoadedVT == MVT::i8) Opcode = Hexagon::LDrib_indexed; - else assert (0 && "unknown memory type"); + else llvm_unreachable("unknown memory type"); // Build indexed load. SDValue TargetConstOff = CurDAG->getTargetConstant(Offset, PointerTy); @@ -375,7 +377,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoadSignExtend64(LoadSDNode *LD, }; ReplaceUses(Froms, Tos, 3); return Result_2; - } + } SDValue TargetConst0 = CurDAG->getTargetConstant(0, MVT::i32); SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32); SDNode *Result_1 = CurDAG->getMachineNode(Opcode, dl, MVT::i32, @@ -516,7 +518,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, DebugLoc dl) { else Opcode = zextval ? Hexagon::LDriub : Hexagon::LDrib; } else - assert (0 && "unknown memory type"); + llvm_unreachable("unknown memory type"); // For zero ext i64 loads, we need to add combine instructions. if (LD->getValueType(0) == MVT::i64 && @@ -613,7 +615,7 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) { else if (StoredVT == MVT::i32) Opcode = Hexagon::POST_STwri; else if (StoredVT == MVT::i16) Opcode = Hexagon::POST_SThri; else if (StoredVT == MVT::i8) Opcode = Hexagon::POST_STbri; - else assert (0 && "unknown memory type"); + else llvm_unreachable("unknown memory type"); // Build post increment store. SDNode* Result = CurDAG->getMachineNode(Opcode, dl, MVT::i32, @@ -636,10 +638,10 @@ SDNode *HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, DebugLoc dl) { // Figure out the opcode. if (StoredVT == MVT::i64) Opcode = Hexagon::STrid; - else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw; + else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed; else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih; else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib; - else assert (0 && "unknown memory type"); + else llvm_unreachable("unknown memory type"); // Build regular store. SDValue TargetConstVal = CurDAG->getTargetConstant(Val, MVT::i32); @@ -693,7 +695,7 @@ SDNode *HexagonDAGToDAGISel::SelectBaseOffsetStore(StoreSDNode *ST, else if (StoredVT == MVT::i32) Opcode = Hexagon::STriw_indexed; else if (StoredVT == MVT::i16) Opcode = Hexagon::STrih_indexed; else if (StoredVT == MVT::i8) Opcode = Hexagon::STrib_indexed; - else assert (0 && "unknown memory type"); + else llvm_unreachable("unknown memory type"); SDValue Ops[] = {SDValue(NewBase,0), CurDAG->getTargetConstant(Offset,PointerTy), @@ -723,7 +725,7 @@ SDNode *HexagonDAGToDAGISel::SelectStore(SDNode *N) { if (AM != ISD::UNINDEXED) { return SelectIndexedStore(ST, dl); } - + return SelectBaseOffsetStore(ST, dl); } @@ -752,7 +754,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) { if (MulOp0.getOpcode() == ISD::SIGN_EXTEND) { SDValue Sext0 = MulOp0.getOperand(0); if (Sext0.getNode()->getValueType(0) != MVT::i32) { - SelectCode(N); + return SelectCode(N); } OP0 = Sext0; @@ -761,7 +763,7 @@ SDNode *HexagonDAGToDAGISel::SelectMul(SDNode *N) { if (LD->getMemoryVT() != MVT::i32 || LD->getExtensionType() != ISD::SEXTLOAD || LD->getAddressingMode() != ISD::UNINDEXED) { - SelectCode(N); + return SelectCode(N); } SDValue Chain = LD->getChain(); @@ -1128,12 +1130,12 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { // For immediates, lower it. for (unsigned i = 1; i < N->getNumOperands(); ++i) { SDNode *Arg = N->getOperand(i).getNode(); - const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI); + const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI, *MF); - if (RC == Hexagon::IntRegsRegisterClass || - RC == Hexagon::DoubleRegsRegisterClass) { + if (RC == &Hexagon::IntRegsRegClass || + RC == &Hexagon::DoubleRegsRegClass) { Ops.push_back(SDValue(Arg, 0)); - } else if (RC == Hexagon::PredRegsRegisterClass) { + } else if (RC == &Hexagon::PredRegsRegClass) { // Do the transfer. SDNode *PdRs = CurDAG->getMachineNode(Hexagon::TFR_PdRs, dl, MVT::i1, SDValue(Arg, 0)); @@ -1158,6 +1160,25 @@ SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) { return SelectCode(N); } +// +// Map floating point constant values. +// +SDNode *HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) { + DebugLoc dl = N->getDebugLoc(); + ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N); + APFloat APF = CN->getValueAPF(); + if (N->getValueType(0) == MVT::f32) { + return CurDAG->getMachineNode(Hexagon::TFRI_f, dl, MVT::f32, + CurDAG->getTargetConstantFP(APF.convertToFloat(), MVT::f32)); + } + else if (N->getValueType(0) == MVT::f64) { + return CurDAG->getMachineNode(Hexagon::CONST64_Float_Real, dl, MVT::f64, + CurDAG->getTargetConstantFP(APF.convertToDouble(), MVT::f64)); + } + + return SelectCode(N); +} + // // Map predicate true (encoded as -1 in LLVM) to a XOR. @@ -1215,7 +1236,7 @@ SDNode *HexagonDAGToDAGISel::SelectAdd(SDNode *N) { // Build Rd = Rd' + asr(Rs, Rt). The machine constraints will ensure that // Rd and Rd' are assigned to the same register - SDNode* Result = CurDAG->getMachineNode(Hexagon::ASR_rr_acc, dl, MVT::i32, + SDNode* Result = CurDAG->getMachineNode(Hexagon::ASR_ADD_rr, dl, MVT::i32, N->getOperand(1), Src1->getOperand(0), Src1->getOperand(1)); @@ -1234,6 +1255,9 @@ SDNode *HexagonDAGToDAGISel::Select(SDNode *N) { case ISD::Constant: return SelectConstant(N); + case ISD::ConstantFP: + return SelectConstantFP(N); + case ISD::ADD: return SelectAdd(N); diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 8c4350d..703a128 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -103,12 +103,12 @@ CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT, State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); return false; } - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { ofst = State.AllocateStack(4, 4); State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); return false; } - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { ofst = State.AllocateStack(8, 8); State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo)); return false; @@ -142,12 +142,12 @@ CC_Hexagon (unsigned ValNo, MVT ValVT, LocInfo = CCValAssign::AExt; } - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } @@ -217,12 +217,12 @@ static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT, LocInfo = CCValAssign::AExt; } - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State)) return false; } @@ -234,7 +234,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) { - if (LocVT == MVT::i32) { + if (LocVT == MVT::i32 || LocVT == MVT::f32) { if (unsigned Reg = State.AllocateReg(Hexagon::R0)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; @@ -249,7 +249,7 @@ static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT, static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State) { - if (LocVT == MVT::i64) { + if (LocVT == MVT::i64 || LocVT == MVT::f64) { if (unsigned Reg = State.AllocateReg(Hexagon::D0)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; @@ -299,7 +299,7 @@ HexagonTargetLowering::LowerReturn(SDValue Chain, // CCState - Info about the registers and stack slot. CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); // Analyze return values of ISD::RET CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon); @@ -351,7 +351,7 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, SmallVector<CCValAssign, 16> RVLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + getTargetMachine(), RVLocs, *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon); @@ -370,21 +370,25 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, /// LowerCall - Functions arguments are copied from virtual regs to /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. SDValue -HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, - bool doesNotRet, bool &isTailCall, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, +HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { + SelectionDAG &DAG = CLI.DAG; + DebugLoc &dl = CLI.DL; + SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; + SmallVector<SDValue, 32> &OutVals = CLI.OutVals; + SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &isTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool isVarArg = CLI.IsVarArg; bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + getTargetMachine(), ArgLocs, *DAG.getContext()); // Check for varargs. NumNamedVarArgParams = -1; @@ -504,7 +508,7 @@ HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. - // The InFlag in necessary since all emited instructions must be + // The InFlag in necessary since all emitted instructions must be // stuck together. SDValue InFlag; if (!isTailCall) { @@ -524,7 +528,7 @@ HexagonTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // than necessary, because it means that each store effectively depends // on every argument instead of just those arguments it would clobber. // - // Do not flag preceeding copytoreg stuff together with the following stuff. + // Do not flag preceding copytoreg stuff together with the following stuff. InFlag = SDValue(); for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, @@ -813,7 +817,7 @@ const { // Assign locations to all of the incoming arguments. SmallVector<CCValAssign, 16> ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + getTargetMachine(), ArgLocs, *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon); @@ -839,14 +843,15 @@ const { // 1. int, long long, ptr args that get allocated in register. // 2. Large struct that gets an register to put its address in. EVT RegVT = VA.getLocVT(); - if (RegVT == MVT::i8 || RegVT == MVT::i16 || RegVT == MVT::i32) { + if (RegVT == MVT::i8 || RegVT == MVT::i16 || + RegVT == MVT::i32 || RegVT == MVT::f32) { unsigned VReg = - RegInfo.createVirtualRegister(Hexagon::IntRegsRegisterClass); + RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); } else if (RegVT == MVT::i64) { unsigned VReg = - RegInfo.createVirtualRegister(Hexagon::DoubleRegsRegisterClass); + RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass); RegInfo.addLiveIn(VA.getLocReg(), VReg); InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); } else { @@ -918,14 +923,33 @@ HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { SDValue HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDValue CC = Op.getOperand(4); + SDValue TrueVal = Op.getOperand(2); + SDValue FalseVal = Op.getOperand(3); + DebugLoc dl = Op.getDebugLoc(); SDNode* OpNode = Op.getNode(); + EVT SVT = OpNode->getValueType(0); - SDValue Cond = DAG.getNode(ISD::SETCC, Op.getDebugLoc(), MVT::i1, - Op.getOperand(2), Op.getOperand(3), - Op.getOperand(4)); - return DAG.getNode(ISD::SELECT, Op.getDebugLoc(), OpNode->getValueType(0), - Cond, Op.getOperand(0), - Op.getOperand(1)); + SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC); + return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal); +} + +SDValue +HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { + EVT ValTy = Op.getValueType(); + + DebugLoc dl = Op.getDebugLoc(); + ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); + SDValue Res; + if (CP->isMachineConstantPoolEntry()) + Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy, + CP->getAlignment()); + else + Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy, + CP->getAlignment()); + return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res); } SDValue @@ -1010,11 +1034,18 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine : TargetLowering(targetmachine, new HexagonTargetObjectFile()), TM(targetmachine) { + const HexagonRegisterInfo* QRI = TM.getRegisterInfo(); + // Set up the register classes. - addRegisterClass(MVT::i32, Hexagon::IntRegsRegisterClass); - addRegisterClass(MVT::i64, Hexagon::DoubleRegsRegisterClass); + addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass); + addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass); - addRegisterClass(MVT::i1, Hexagon::PredRegsRegisterClass); + if (QRI->Subtarget.hasV5TOps()) { + addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass); + addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass); + } + + addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass); computeRegisterProperties(); @@ -1028,32 +1059,16 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine // // Library calls for unsupported operations // - setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2"); - setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf"); setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf"); setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf"); - setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf"); - setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf"); - setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf"); - setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf"); - setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi"); - setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi"); setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti"); - - setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi"); - setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi"); setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti"); - setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf"); - setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi"); setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti"); - setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi"); setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti"); - setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2"); - setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3"); setOperationAction(ISD::SDIV, MVT::i32, Expand); setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3"); @@ -1082,92 +1097,184 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3"); setOperationAction(ISD::FDIV, MVT::f64, Expand); - setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2"); - setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand); + setOperationAction(ISD::FSQRT, MVT::f32, Expand); + setOperationAction(ISD::FSQRT, MVT::f64, Expand); + setOperationAction(ISD::FSIN, MVT::f32, Expand); + setOperationAction(ISD::FSIN, MVT::f64, Expand); + + if (QRI->Subtarget.hasV5TOps()) { + // Hexagon V5 Support. + setOperationAction(ISD::FADD, MVT::f32, Legal); + setOperationAction(ISD::FADD, MVT::f64, Legal); + setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal); + setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal); + setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal); + setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal); + setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal); + + setCondCodeAction(ISD::SETOGE, MVT::f32, Legal); + setCondCodeAction(ISD::SETOGE, MVT::f64, Legal); + setCondCodeAction(ISD::SETUGE, MVT::f32, Legal); + setCondCodeAction(ISD::SETUGE, MVT::f64, Legal); + + setCondCodeAction(ISD::SETOGT, MVT::f32, Legal); + setCondCodeAction(ISD::SETOGT, MVT::f64, Legal); + setCondCodeAction(ISD::SETUGT, MVT::f32, Legal); + setCondCodeAction(ISD::SETUGT, MVT::f64, Legal); + + setCondCodeAction(ISD::SETOLE, MVT::f32, Legal); + setCondCodeAction(ISD::SETOLE, MVT::f64, Legal); + setCondCodeAction(ISD::SETOLT, MVT::f32, Legal); + setCondCodeAction(ISD::SETOLT, MVT::f64, Legal); + + setOperationAction(ISD::ConstantFP, MVT::f32, Legal); + setOperationAction(ISD::ConstantFP, MVT::f64, Legal); + + setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote); + setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); + + setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote); + setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote); + + setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); + setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); + setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); + setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); + + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); + + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal); + + setOperationAction(ISD::FABS, MVT::f32, Legal); + setOperationAction(ISD::FABS, MVT::f64, Expand); + + setOperationAction(ISD::FNEG, MVT::f32, Legal); + setOperationAction(ISD::FNEG, MVT::f64, Expand); + } else { + + // Expand fp<->uint. + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); - setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf"); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); - setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); - setOperationAction(ISD::FADD, MVT::f64, Expand); + setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf"); + setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf"); - setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3"); - setOperationAction(ISD::FADD, MVT::f32, Expand); + setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf"); + setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf"); - setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3"); - setOperationAction(ISD::FADD, MVT::f32, Expand); + setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf"); + setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf"); - setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2"); - setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); + setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf"); + setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf"); - setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi"); - setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand); + setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi"); + setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi"); - setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi"); - setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand); + setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi"); + setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi"); - setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf"); - setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi"); + setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi"); - setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2"); - setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); + setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3"); + setOperationAction(ISD::FADD, MVT::f64, Expand); - setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2"); - setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); + setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3"); + setOperationAction(ISD::FADD, MVT::f32, Expand); - setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2"); - setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); + setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2"); + setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand); - setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2"); - setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); + setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2"); + setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); - setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2"); - setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); + setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2"); + setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); - setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2"); - setCondCodeAction(ISD::SETOLT, MVT::f64, Expand); + setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2"); + setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); - setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2"); - setCondCodeAction(ISD::SETOLT, MVT::f32, Expand); + setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2"); + setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); - setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); - setOperationAction(ISD::SREM, MVT::i32, Expand); + setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2"); + setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); + + setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2"); + setCondCodeAction(ISD::SETOGT, MVT::f64, Expand); + + setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi"); + setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand); - setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); - setOperationAction(ISD::FMUL, MVT::f64, Expand); + setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi"); + setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand); - setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3"); - setOperationAction(ISD::MUL, MVT::f32, Expand); + setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2"); + setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); - setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2"); - setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); + setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2"); + setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); - setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2"); + setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2"); + setCondCodeAction(ISD::SETOLT, MVT::f64, Expand); + setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2"); + setCondCodeAction(ISD::SETOLT, MVT::f32, Expand); - setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); - setOperationAction(ISD::SUB, MVT::f64, Expand); + setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3"); + setOperationAction(ISD::FMUL, MVT::f64, Expand); - setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3"); - setOperationAction(ISD::SUB, MVT::f32, Expand); + setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3"); + setOperationAction(ISD::MUL, MVT::f32, Expand); - setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2"); - setOperationAction(ISD::FP_ROUND, MVT::f64, Expand); + setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2"); + setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); - setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2"); - setCondCodeAction(ISD::SETUO, MVT::f64, Expand); + setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2"); - setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2"); - setCondCodeAction(ISD::SETO, MVT::f64, Expand); + setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3"); + setOperationAction(ISD::SUB, MVT::f64, Expand); - setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2"); - setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); + setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3"); + setOperationAction(ISD::SUB, MVT::f32, Expand); - setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2"); - setCondCodeAction(ISD::SETO, MVT::f32, Expand); + setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2"); + setOperationAction(ISD::FP_ROUND, MVT::f64, Expand); - setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2"); - setCondCodeAction(ISD::SETUO, MVT::f32, Expand); + setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2"); + setCondCodeAction(ISD::SETUO, MVT::f64, Expand); + + setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2"); + setCondCodeAction(ISD::SETO, MVT::f64, Expand); + + setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2"); + setCondCodeAction(ISD::SETO, MVT::f32, Expand); + + setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2"); + setCondCodeAction(ISD::SETUO, MVT::f32, Expand); + + setOperationAction(ISD::FABS, MVT::f32, Expand); + setOperationAction(ISD::FABS, MVT::f64, Expand); + setOperationAction(ISD::FNEG, MVT::f32, Expand); + setOperationAction(ISD::FNEG, MVT::f64, Expand); + } + + setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3"); + setOperationAction(ISD::SREM, MVT::i32, Expand); setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal); setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); @@ -1208,20 +1315,33 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine setOperationAction(ISD::BSWAP, MVT::i64, Expand); - // Expand fp<->uint. - setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); - setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); - - // Hexagon has no select or setcc: expand to SELECT_CC. - setOperationAction(ISD::SELECT, MVT::f32, Expand); - setOperationAction(ISD::SELECT, MVT::f64, Expand); - // Lower SELECT_CC to SETCC and SELECT. setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); - // This is a workaround documented in DAGCombiner.cpp:2892 We don't - // support SELECT_CC on every type. - setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + if (QRI->Subtarget.hasV5TOps()) { + + // We need to make the operation type of SELECT node to be Custom, + // such that we don't go into the infinite loop of + // select -> setcc -> select_cc -> select loop. + setOperationAction(ISD::SELECT, MVT::f32, Custom); + setOperationAction(ISD::SELECT, MVT::f64, Custom); + + setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); + setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + } else { + + // Hexagon has no select or setcc: expand to SELECT_CC. + setOperationAction(ISD::SELECT, MVT::f32, Expand); + setOperationAction(ISD::SELECT, MVT::f64, Expand); + + // This is a workaround documented in DAGCombiner.cpp:2892 We don't + // support SELECT_CC on every type. + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + + } setOperationAction(ISD::BR_CC, MVT::Other, Expand); setOperationAction(ISD::BRIND, MVT::Other, Expand); @@ -1307,22 +1427,22 @@ const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { default: return 0; - case HexagonISD::CONST32: return "HexagonISD::CONST32"; + case HexagonISD::CONST32: return "HexagonISD::CONST32"; case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC"; - case HexagonISD::CMPICC: return "HexagonISD::CMPICC"; - case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC"; - case HexagonISD::BRICC: return "HexagonISD::BRICC"; - case HexagonISD::BRFCC: return "HexagonISD::BRFCC"; - case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC"; - case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC"; - case HexagonISD::Hi: return "HexagonISD::Hi"; - case HexagonISD::Lo: return "HexagonISD::Lo"; - case HexagonISD::FTOI: return "HexagonISD::FTOI"; - case HexagonISD::ITOF: return "HexagonISD::ITOF"; - case HexagonISD::CALL: return "HexagonISD::CALL"; - case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG"; - case HexagonISD::BR_JT: return "HexagonISD::BR_JT"; - case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; + case HexagonISD::CMPICC: return "HexagonISD::CMPICC"; + case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC"; + case HexagonISD::BRICC: return "HexagonISD::BRICC"; + case HexagonISD::BRFCC: return "HexagonISD::BRFCC"; + case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC"; + case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC"; + case HexagonISD::Hi: return "HexagonISD::Hi"; + case HexagonISD::Lo: return "HexagonISD::Lo"; + case HexagonISD::FTOI: return "HexagonISD::FTOI"; + case HexagonISD::ITOF: return "HexagonISD::ITOF"; + case HexagonISD::CALL: return "HexagonISD::CALL"; + case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG"; + case HexagonISD::BR_JT: return "HexagonISD::BR_JT"; + case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN"; } } @@ -1347,9 +1467,10 @@ SDValue HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { default: llvm_unreachable("Should not custom lower this!"); + case ISD::ConstantPool: return LowerConstantPool(Op, DAG); // Frame & Return address. Currently unimplemented. - case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); - case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); case ISD::GlobalTLSAddress: llvm_unreachable("TLS not implemented for Hexagon."); case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG); @@ -1359,9 +1480,10 @@ HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::BR_JT: return LowerBR_JT(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); - case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::SELECT: return Op; case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); - case ISD::INLINEASM: return LowerINLINEASM(Op, DAG); + case ISD::INLINEASM: return LowerINLINEASM(Op, DAG); } } @@ -1404,9 +1526,11 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const case MVT::i32: case MVT::i16: case MVT::i8: - return std::make_pair(0U, Hexagon::IntRegsRegisterClass); + case MVT::f32: + return std::make_pair(0U, &Hexagon::IntRegsRegClass); case MVT::i64: - return std::make_pair(0U, Hexagon::DoubleRegsRegisterClass); + case MVT::f64: + return std::make_pair(0U, &Hexagon::DoubleRegsRegClass); } default: llvm_unreachable("Unknown asm register class"); @@ -1416,6 +1540,14 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(const return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } +/// isFPImmLegal - Returns true if the target can instruction select the +/// specified FP immediate natively. If false, the legalizer will +/// materialize the FP immediate as a load from a constant pool. +bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { + const HexagonRegisterInfo* QRI = TM.getRegisterInfo(); + return QRI->Subtarget.hasV5TOps(); +} + /// isLegalAddressingMode - Return true if the addressing mode represented by /// AM is legal for this target, for a load/store of the specified type. bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM, diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index 4208bcb..fe6c905 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -27,6 +27,7 @@ namespace llvm { CONST32, CONST32_GP, // For marking data present in GP. + FCONST32, SETCC, ADJDYNALLOC, ARGEXTEND, @@ -48,6 +49,7 @@ namespace llvm { BR_JT, // Jump table. BARRIER, // Memory barrier. WrapperJT, + WrapperCP, TC_RETURN }; } @@ -94,13 +96,7 @@ namespace llvm { SmallVectorImpl<SDValue> &InVals) const; SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerCall(SDValue Chain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, - bool doesNotRet, bool &isTailCall, - const SmallVectorImpl<ISD::OutputArg> &Outs, - const SmallVectorImpl<SDValue> &OutVals, - const SmallVectorImpl<ISD::InputArg> &Ins, - DebugLoc dl, SelectionDAG &DAG, + SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const; SDValue LowerCallResult(SDValue Chain, SDValue InFlag, @@ -128,6 +124,7 @@ namespace llvm { MachineBasicBlock *BB) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; virtual EVT getSetCCResultType(EVT VT) const { return MVT::i1; } @@ -150,6 +147,7 @@ namespace llvm { /// mode is legal for a load/store of any legal type. /// TODO: Handle pre/postinc as well. virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; + virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; /// isLegalICmpImmediate - Return true if the specified immediate is legal /// icmp immediate, that is the target has icmp instructions which can diff --git a/lib/Target/Hexagon/HexagonImmediates.td b/lib/Target/Hexagon/HexagonImmediates.td index e78bb79..18692c4 100644 --- a/lib/Target/Hexagon/HexagonImmediates.td +++ b/lib/Target/Hexagon/HexagonImmediates.td @@ -371,7 +371,7 @@ def s4_3ImmPred : PatLeaf<(i32 imm), [{ def u64ImmPred : PatLeaf<(i64 imm), [{ // immS16 predicate - True if the immediate fits in a 16-bit sign extended // field. - // Adding "N ||" to supress gcc unused warning. + // Adding "N ||" to suppress gcc unused warning. return (N || true); }]>; diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td index c9f16fb..e472d49 100644 --- a/lib/Target/Hexagon/HexagonInstrFormats.td +++ b/lib/Target/Hexagon/HexagonInstrFormats.td @@ -13,29 +13,48 @@ // *** Must match HexagonBaseInfo.h *** //===----------------------------------------------------------------------===// +class Type<bits<5> t> { + bits<5> Value = t; +} +def TypePSEUDO : Type<0>; +def TypeALU32 : Type<1>; +def TypeCR : Type<2>; +def TypeJR : Type<3>; +def TypeJ : Type<4>; +def TypeLD : Type<5>; +def TypeST : Type<6>; +def TypeSYSTEM : Type<7>; +def TypeXTYPE : Type<8>; +def TypeMARKER : Type<31>; //===----------------------------------------------------------------------===// // Intruction Class Declaration + //===----------------------------------------------------------------------===// class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr, InstrItinClass itin> : Instruction { + string cstr, InstrItinClass itin, Type type> : Instruction { field bits<32> Inst; let Namespace = "Hexagon"; dag OutOperandList = outs; dag InOperandList = ins; - let AsmString = asmstr; + let AsmString = asmstr; let Pattern = pattern; let Constraints = cstr; - let Itinerary = itin; - - // *** The code below must match HexagonBaseInfo.h *** - + let Itinerary = itin; + let Size = 4; + + // *** Must match HexagonBaseInfo.h *** + // Instruction type according to the ISA. + Type HexagonType = type; + let TSFlags{4-0} = HexagonType.Value; + // Solo instructions, i.e., those that cannot be in a packet with others. + bits<1> isHexagonSolo = 0; + let TSFlags{5} = isHexagonSolo; // Predicated instructions. bits<1> isPredicated = 0; - let TSFlags{1} = isPredicated; + let TSFlags{6} = isPredicated; // *** The code above must match HexagonBaseInfo.h *** } @@ -47,17 +66,25 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern, // LD Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", LD> { + : InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> { bits<5> rd; bits<5> rs; bits<13> imm13; } +class LDInst2<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> { + bits<5> rd; + bits<5> rs; + bits<13> imm13; + let mayLoad = 1; +} + // LD Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, LD> { + : InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -68,7 +95,24 @@ class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern, // ST Instruction Class in V4 can take SLOT0 & SLOT1. // Definition of the instruction class CHANGED from V2/V3 to V4. class STInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", ST> { + : InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> { + bits<5> rd; + bits<5> rs; + bits<13> imm13; +} + +class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> { + bits<5> rd; + bits<5> rs; + bits<13> imm13; + let mayStore = 1; +} + +// SYSTEM Instruction Class in V4 can take SLOT0 only +// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1. +class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstHexagon<outs, ins, asmstr, pattern, "", SYS, TypeSYSTEM> { bits<5> rd; bits<5> rs; bits<13> imm13; @@ -79,7 +123,7 @@ class STInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class CHANGED from V2/V3 to V4. class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, ST> { + : InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -89,7 +133,7 @@ class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern, // ALU32 Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", ALU32> { + : InstHexagon<outs, ins, asmstr, pattern, "", ALU32, TypeALU32> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -102,7 +146,17 @@ class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4. class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", ALU64> { + : InstHexagon<outs, ins, asmstr, pattern, "", ALU64, TypeXTYPE> { + bits<5> rd; + bits<5> rs; + bits<5> rt; + bits<16> imm16; + bits<16> imm16_2; +} + +class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern, + string cstr> + : InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64, TypeXTYPE> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -115,7 +169,7 @@ class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4. class MInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", M> { + : InstHexagon<outs, ins, asmstr, pattern, "", M, TypeXTYPE> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -126,8 +180,8 @@ class MInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4. class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, M> { + string cstr> + : InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -138,9 +192,7 @@ class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4. class SInst<dag outs, dag ins, string asmstr, list<dag> pattern> -//: InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, M)> { - : InstHexagon<outs, ins, asmstr, pattern, "", S> { -// : InstHexagon<outs, ins, asmstr, pattern, "", S> { + : InstHexagon<outs, ins, asmstr, pattern, "", S, TypeXTYPE> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -151,8 +203,8 @@ class SInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4. class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, S> { + string cstr> + : InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE> { // : InstHexagon<outs, ins, asmstr, pattern, cstr, S> { // : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, S)> { bits<5> rd; @@ -163,14 +215,14 @@ class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, // J Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class JType<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", J> { + : InstHexagon<outs, ins, asmstr, pattern, "", J, TypeJ> { bits<16> imm16; } // JR Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class JRType<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", JR> { + : InstHexagon<outs, ins, asmstr, pattern, "", JR, TypeJR> { bits<5> rs; bits<5> pu; // Predicate register } @@ -178,15 +230,22 @@ class JRType<dag outs, dag ins, string asmstr, list<dag> pattern> // CR Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", CR> { + : InstHexagon<outs, ins, asmstr, pattern, "", CR, TypeCR> { bits<5> rs; bits<10> imm10; } +class Marker<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstHexagon<outs, ins, asmstr, pattern, "", MARKER, TypeMARKER> { + let isCodeGenOnly = 1; + let isPseudo = 1; +} class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO>; - + : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO, TypePSEUDO> { + let isCodeGenOnly = 1; + let isPseudo = 1; +} //===----------------------------------------------------------------------===// // Intruction Classes Definitions - @@ -222,6 +281,11 @@ class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern> : ALU64Type<outs, ins, asmstr, pattern> { } +class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern> + : ALU64Type<outs, ins, asmstr, pattern> { + let rt{0-4} = 0; +} + // J Type Instructions. class JInst<dag outs, dag ins, string asmstr, list<dag> pattern> : JType<outs, ins, asmstr, pattern> { @@ -234,15 +298,31 @@ class JRInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Post increment ST Instruction. -class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> +class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, + string cstr> + : STInstPost<outs, ins, asmstr, pattern, cstr> { + let rt{0-4} = 0; +} + +class STInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern, + string cstr> : STInstPost<outs, ins, asmstr, pattern, cstr> { let rt{0-4} = 0; + let mayStore = 1; } // Post increment LD Instruction. -class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> +class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, + string cstr> + : LDInstPost<outs, ins, asmstr, pattern, cstr> { + let rt{0-4} = 0; +} + +class LDInst2PI<dag outs, dag ins, string asmstr, list<dag> pattern, + string cstr> : LDInstPost<outs, ins, asmstr, pattern, cstr> { let rt{0-4} = 0; + let mayLoad = 1; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/lib/Target/Hexagon/HexagonInstrFormatsV4.td index bd5e449..49741a3 100644 --- a/lib/Target/Hexagon/HexagonInstrFormatsV4.td +++ b/lib/Target/Hexagon/HexagonInstrFormatsV4.td @@ -11,11 +11,25 @@ // //===----------------------------------------------------------------------===// +//----------------------------------------------------------------------------// +// Hexagon Intruction Flags + +// +// *** Must match BaseInfo.h *** +//----------------------------------------------------------------------------// + +def TypeMEMOP : Type<9>; +def TypeNV : Type<10>; +def TypePREFIX : Type<30>; + +//----------------------------------------------------------------------------// +// Intruction Classes Definitions + +//----------------------------------------------------------------------------// + // // NV type instructions. // class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4> { + : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4, TypeNV> { bits<5> rd; bits<5> rs; bits<13> imm13; @@ -24,7 +38,7 @@ class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of Post increment new value store. class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4> { + : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -39,8 +53,15 @@ class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern, } class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4> { + : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4, TypeMEMOP> { bits<5> rd; bits<5> rs; bits<6> imm6; } + +class Immext<dag outs, dag ins, string asmstr, list<dag> pattern> + : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypePREFIX> { + let isCodeGenOnly = 1; + + bits<26> imm26; +} diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 77b3663..c8f933d 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -11,10 +11,10 @@ // //===----------------------------------------------------------------------===// -#include "Hexagon.h" #include "HexagonInstrInfo.h" #include "HexagonRegisterInfo.h" #include "HexagonSubtarget.h" +#include "Hexagon.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/DFAPacketizer.h" @@ -34,24 +34,23 @@ using namespace llvm; /// Constants for Hexagon instructions. /// const int Hexagon_MEMW_OFFSET_MAX = 4095; -const int Hexagon_MEMW_OFFSET_MIN = 4096; +const int Hexagon_MEMW_OFFSET_MIN = -4096; const int Hexagon_MEMD_OFFSET_MAX = 8191; -const int Hexagon_MEMD_OFFSET_MIN = 8192; +const int Hexagon_MEMD_OFFSET_MIN = -8192; const int Hexagon_MEMH_OFFSET_MAX = 2047; -const int Hexagon_MEMH_OFFSET_MIN = 2048; +const int Hexagon_MEMH_OFFSET_MIN = -2048; const int Hexagon_MEMB_OFFSET_MAX = 1023; -const int Hexagon_MEMB_OFFSET_MIN = 1024; +const int Hexagon_MEMB_OFFSET_MIN = -1024; const int Hexagon_ADDI_OFFSET_MAX = 32767; -const int Hexagon_ADDI_OFFSET_MIN = 32768; +const int Hexagon_ADDI_OFFSET_MIN = -32768; const int Hexagon_MEMD_AUTOINC_MAX = 56; -const int Hexagon_MEMD_AUTOINC_MIN = 64; +const int Hexagon_MEMD_AUTOINC_MIN = -64; const int Hexagon_MEMW_AUTOINC_MAX = 28; -const int Hexagon_MEMW_AUTOINC_MIN = 32; +const int Hexagon_MEMW_AUTOINC_MIN = -32; const int Hexagon_MEMH_AUTOINC_MAX = 14; -const int Hexagon_MEMH_AUTOINC_MIN = 16; +const int Hexagon_MEMH_AUTOINC_MIN = -16; const int Hexagon_MEMB_AUTOINC_MAX = 7; -const int Hexagon_MEMB_AUTOINC_MIN = 8; - +const int Hexagon_MEMB_AUTOINC_MIN = -8; HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST) @@ -70,6 +69,7 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, switch (MI->getOpcode()) { + default: break; case Hexagon::LDriw: case Hexagon::LDrid: case Hexagon::LDrih: @@ -81,11 +81,7 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, return MI->getOperand(0).getReg(); } break; - - default: - break; } - return 0; } @@ -98,21 +94,18 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const { switch (MI->getOpcode()) { + default: break; case Hexagon::STriw: case Hexagon::STrid: case Hexagon::STrih: case Hexagon::STrib: if (MI->getOperand(2).isFI() && MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) { - FrameIndex = MI->getOperand(2).getIndex(); - return MI->getOperand(0).getReg(); + FrameIndex = MI->getOperand(0).getIndex(); + return MI->getOperand(2).getReg(); } break; - - default: - break; } - return 0; } @@ -176,6 +169,7 @@ bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&FBB, SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const { + TBB = NULL; FBB = NULL; // If the block has no terminators, it just falls into the block after it. @@ -328,7 +322,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB, DestReg).addReg(SrcReg).addReg(SrcReg); return; } - if (Hexagon::DoubleRegsRegClass.contains(DestReg, SrcReg)) { + if (Hexagon::DoubleRegsRegClass.contains(DestReg) && + Hexagon::IntRegsRegClass.contains(SrcReg)) { // We can have an overlap between single and double reg: r1:0 = r0. if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) { // r1:0 = r0 @@ -343,7 +338,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB, } return; } - if (Hexagon::CRRegsRegClass.contains(DestReg, SrcReg)) { + if (Hexagon::CRRegsRegClass.contains(DestReg) && + Hexagon::IntRegsRegClass.contains(SrcReg)) { BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg); return; } @@ -370,15 +366,15 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MFI.getObjectSize(FI), Align); - if (Hexagon::IntRegsRegisterClass->hasSubClassEq(RC)) { + if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::STriw)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); - } else if (Hexagon::DoubleRegsRegisterClass->hasSubClassEq(RC)) { + } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::STrid)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); - } else if (Hexagon::PredRegsRegisterClass->hasSubClassEq(RC)) { + } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::STriw_pred)) .addFrameIndex(FI).addImm(0) .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO); @@ -415,14 +411,13 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align); - - if (RC == Hexagon::IntRegsRegisterClass) { + if (RC == &Hexagon::IntRegsRegClass) { BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); - } else if (RC == Hexagon::DoubleRegsRegisterClass) { + } else if (RC == &Hexagon::DoubleRegsRegClass) { BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); - } else if (RC == Hexagon::PredRegsRegisterClass) { + } else if (RC == &Hexagon::PredRegsRegClass) { BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } else { @@ -453,11 +448,11 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetRegisterClass *TRC; if (VT == MVT::i1) { - TRC = Hexagon::PredRegsRegisterClass; - } else if (VT == MVT::i32) { - TRC = Hexagon::IntRegsRegisterClass; - } else if (VT == MVT::i64) { - TRC = Hexagon::DoubleRegsRegisterClass; + TRC = &Hexagon::PredRegsRegClass; + } else if (VT == MVT::i32 || VT == MVT::f32) { + TRC = &Hexagon::IntRegsRegClass; + } else if (VT == MVT::i64 || VT == MVT::f64) { + TRC = &Hexagon::DoubleRegsRegClass; } else { llvm_unreachable("Cannot handle this register class"); } @@ -466,7 +461,852 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { return NewReg; } +bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const { + switch(MI->getOpcode()) { + default: return false; + // JMP_EQri + case Hexagon::JMP_EQriPt_nv_V4: + case Hexagon::JMP_EQriPnt_nv_V4: + case Hexagon::JMP_EQriNotPt_nv_V4: + case Hexagon::JMP_EQriNotPnt_nv_V4: + + // JMP_EQri - with -1 + case Hexagon::JMP_EQriPtneg_nv_V4: + case Hexagon::JMP_EQriPntneg_nv_V4: + case Hexagon::JMP_EQriNotPtneg_nv_V4: + case Hexagon::JMP_EQriNotPntneg_nv_V4: + + // JMP_EQrr + case Hexagon::JMP_EQrrPt_nv_V4: + case Hexagon::JMP_EQrrPnt_nv_V4: + case Hexagon::JMP_EQrrNotPt_nv_V4: + case Hexagon::JMP_EQrrNotPnt_nv_V4: + + // JMP_GTri + case Hexagon::JMP_GTriPt_nv_V4: + case Hexagon::JMP_GTriPnt_nv_V4: + case Hexagon::JMP_GTriNotPt_nv_V4: + case Hexagon::JMP_GTriNotPnt_nv_V4: + + // JMP_GTri - with -1 + case Hexagon::JMP_GTriPtneg_nv_V4: + case Hexagon::JMP_GTriPntneg_nv_V4: + case Hexagon::JMP_GTriNotPtneg_nv_V4: + case Hexagon::JMP_GTriNotPntneg_nv_V4: + // JMP_GTrr + case Hexagon::JMP_GTrrPt_nv_V4: + case Hexagon::JMP_GTrrPnt_nv_V4: + case Hexagon::JMP_GTrrNotPt_nv_V4: + case Hexagon::JMP_GTrrNotPnt_nv_V4: + + // JMP_GTrrdn + case Hexagon::JMP_GTrrdnPt_nv_V4: + case Hexagon::JMP_GTrrdnPnt_nv_V4: + case Hexagon::JMP_GTrrdnNotPt_nv_V4: + case Hexagon::JMP_GTrrdnNotPnt_nv_V4: + + // JMP_GTUri + case Hexagon::JMP_GTUriPt_nv_V4: + case Hexagon::JMP_GTUriPnt_nv_V4: + case Hexagon::JMP_GTUriNotPt_nv_V4: + case Hexagon::JMP_GTUriNotPnt_nv_V4: + + // JMP_GTUrr + case Hexagon::JMP_GTUrrPt_nv_V4: + case Hexagon::JMP_GTUrrPnt_nv_V4: + case Hexagon::JMP_GTUrrNotPt_nv_V4: + case Hexagon::JMP_GTUrrNotPnt_nv_V4: + + // JMP_GTUrrdn + case Hexagon::JMP_GTUrrdnPt_nv_V4: + case Hexagon::JMP_GTUrrdnPnt_nv_V4: + case Hexagon::JMP_GTUrrdnNotPt_nv_V4: + case Hexagon::JMP_GTUrrdnNotPnt_nv_V4: + + // TFR_FI + case Hexagon::TFR_FI: + return true; + } +} + +bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const { + switch(MI->getOpcode()) { + default: return false; + // JMP_EQri + case Hexagon::JMP_EQriPt_ie_nv_V4: + case Hexagon::JMP_EQriPnt_ie_nv_V4: + case Hexagon::JMP_EQriNotPt_ie_nv_V4: + case Hexagon::JMP_EQriNotPnt_ie_nv_V4: + + // JMP_EQri - with -1 + case Hexagon::JMP_EQriPtneg_ie_nv_V4: + case Hexagon::JMP_EQriPntneg_ie_nv_V4: + case Hexagon::JMP_EQriNotPtneg_ie_nv_V4: + case Hexagon::JMP_EQriNotPntneg_ie_nv_V4: + + // JMP_EQrr + case Hexagon::JMP_EQrrPt_ie_nv_V4: + case Hexagon::JMP_EQrrPnt_ie_nv_V4: + case Hexagon::JMP_EQrrNotPt_ie_nv_V4: + case Hexagon::JMP_EQrrNotPnt_ie_nv_V4: + + // JMP_GTri + case Hexagon::JMP_GTriPt_ie_nv_V4: + case Hexagon::JMP_GTriPnt_ie_nv_V4: + case Hexagon::JMP_GTriNotPt_ie_nv_V4: + case Hexagon::JMP_GTriNotPnt_ie_nv_V4: + + // JMP_GTri - with -1 + case Hexagon::JMP_GTriPtneg_ie_nv_V4: + case Hexagon::JMP_GTriPntneg_ie_nv_V4: + case Hexagon::JMP_GTriNotPtneg_ie_nv_V4: + case Hexagon::JMP_GTriNotPntneg_ie_nv_V4: + + // JMP_GTrr + case Hexagon::JMP_GTrrPt_ie_nv_V4: + case Hexagon::JMP_GTrrPnt_ie_nv_V4: + case Hexagon::JMP_GTrrNotPt_ie_nv_V4: + case Hexagon::JMP_GTrrNotPnt_ie_nv_V4: + + // JMP_GTrrdn + case Hexagon::JMP_GTrrdnPt_ie_nv_V4: + case Hexagon::JMP_GTrrdnPnt_ie_nv_V4: + case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4: + case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4: + + // JMP_GTUri + case Hexagon::JMP_GTUriPt_ie_nv_V4: + case Hexagon::JMP_GTUriPnt_ie_nv_V4: + case Hexagon::JMP_GTUriNotPt_ie_nv_V4: + case Hexagon::JMP_GTUriNotPnt_ie_nv_V4: + + // JMP_GTUrr + case Hexagon::JMP_GTUrrPt_ie_nv_V4: + case Hexagon::JMP_GTUrrPnt_ie_nv_V4: + case Hexagon::JMP_GTUrrNotPt_ie_nv_V4: + case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4: + + // JMP_GTUrrdn + case Hexagon::JMP_GTUrrdnPt_ie_nv_V4: + case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4: + case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4: + case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4: + + // V4 absolute set addressing. + case Hexagon::LDrid_abs_setimm_V4: + case Hexagon::LDriw_abs_setimm_V4: + case Hexagon::LDrih_abs_setimm_V4: + case Hexagon::LDrib_abs_setimm_V4: + case Hexagon::LDriuh_abs_setimm_V4: + case Hexagon::LDriub_abs_setimm_V4: + + case Hexagon::STrid_abs_setimm_V4: + case Hexagon::STrib_abs_setimm_V4: + case Hexagon::STrih_abs_setimm_V4: + case Hexagon::STriw_abs_setimm_V4: + + // V4 global address load. + case Hexagon::LDrid_GP_cPt_V4 : + case Hexagon::LDrid_GP_cNotPt_V4 : + case Hexagon::LDrid_GP_cdnPt_V4 : + case Hexagon::LDrid_GP_cdnNotPt_V4 : + case Hexagon::LDrib_GP_cPt_V4 : + case Hexagon::LDrib_GP_cNotPt_V4 : + case Hexagon::LDrib_GP_cdnPt_V4 : + case Hexagon::LDrib_GP_cdnNotPt_V4 : + case Hexagon::LDriub_GP_cPt_V4 : + case Hexagon::LDriub_GP_cNotPt_V4 : + case Hexagon::LDriub_GP_cdnPt_V4 : + case Hexagon::LDriub_GP_cdnNotPt_V4 : + case Hexagon::LDrih_GP_cPt_V4 : + case Hexagon::LDrih_GP_cNotPt_V4 : + case Hexagon::LDrih_GP_cdnPt_V4 : + case Hexagon::LDrih_GP_cdnNotPt_V4 : + case Hexagon::LDriuh_GP_cPt_V4 : + case Hexagon::LDriuh_GP_cNotPt_V4 : + case Hexagon::LDriuh_GP_cdnPt_V4 : + case Hexagon::LDriuh_GP_cdnNotPt_V4 : + case Hexagon::LDriw_GP_cPt_V4 : + case Hexagon::LDriw_GP_cNotPt_V4 : + case Hexagon::LDriw_GP_cdnPt_V4 : + case Hexagon::LDriw_GP_cdnNotPt_V4 : + case Hexagon::LDd_GP_cPt_V4 : + case Hexagon::LDd_GP_cNotPt_V4 : + case Hexagon::LDd_GP_cdnPt_V4 : + case Hexagon::LDd_GP_cdnNotPt_V4 : + case Hexagon::LDb_GP_cPt_V4 : + case Hexagon::LDb_GP_cNotPt_V4 : + case Hexagon::LDb_GP_cdnPt_V4 : + case Hexagon::LDb_GP_cdnNotPt_V4 : + case Hexagon::LDub_GP_cPt_V4 : + case Hexagon::LDub_GP_cNotPt_V4 : + case Hexagon::LDub_GP_cdnPt_V4 : + case Hexagon::LDub_GP_cdnNotPt_V4 : + case Hexagon::LDh_GP_cPt_V4 : + case Hexagon::LDh_GP_cNotPt_V4 : + case Hexagon::LDh_GP_cdnPt_V4 : + case Hexagon::LDh_GP_cdnNotPt_V4 : + case Hexagon::LDuh_GP_cPt_V4 : + case Hexagon::LDuh_GP_cNotPt_V4 : + case Hexagon::LDuh_GP_cdnPt_V4 : + case Hexagon::LDuh_GP_cdnNotPt_V4 : + case Hexagon::LDw_GP_cPt_V4 : + case Hexagon::LDw_GP_cNotPt_V4 : + case Hexagon::LDw_GP_cdnPt_V4 : + case Hexagon::LDw_GP_cdnNotPt_V4 : + + // V4 global address store. + case Hexagon::STrid_GP_cPt_V4 : + case Hexagon::STrid_GP_cNotPt_V4 : + case Hexagon::STrid_GP_cdnPt_V4 : + case Hexagon::STrid_GP_cdnNotPt_V4 : + case Hexagon::STrib_GP_cPt_V4 : + case Hexagon::STrib_GP_cNotPt_V4 : + case Hexagon::STrib_GP_cdnPt_V4 : + case Hexagon::STrib_GP_cdnNotPt_V4 : + case Hexagon::STrih_GP_cPt_V4 : + case Hexagon::STrih_GP_cNotPt_V4 : + case Hexagon::STrih_GP_cdnPt_V4 : + case Hexagon::STrih_GP_cdnNotPt_V4 : + case Hexagon::STriw_GP_cPt_V4 : + case Hexagon::STriw_GP_cNotPt_V4 : + case Hexagon::STriw_GP_cdnPt_V4 : + case Hexagon::STriw_GP_cdnNotPt_V4 : + case Hexagon::STd_GP_cPt_V4 : + case Hexagon::STd_GP_cNotPt_V4 : + case Hexagon::STd_GP_cdnPt_V4 : + case Hexagon::STd_GP_cdnNotPt_V4 : + case Hexagon::STb_GP_cPt_V4 : + case Hexagon::STb_GP_cNotPt_V4 : + case Hexagon::STb_GP_cdnPt_V4 : + case Hexagon::STb_GP_cdnNotPt_V4 : + case Hexagon::STh_GP_cPt_V4 : + case Hexagon::STh_GP_cNotPt_V4 : + case Hexagon::STh_GP_cdnPt_V4 : + case Hexagon::STh_GP_cdnNotPt_V4 : + case Hexagon::STw_GP_cPt_V4 : + case Hexagon::STw_GP_cNotPt_V4 : + case Hexagon::STw_GP_cdnPt_V4 : + case Hexagon::STw_GP_cdnNotPt_V4 : + + // V4 predicated global address new value store. + case Hexagon::STrib_GP_cPt_nv_V4 : + case Hexagon::STrib_GP_cNotPt_nv_V4 : + case Hexagon::STrib_GP_cdnPt_nv_V4 : + case Hexagon::STrib_GP_cdnNotPt_nv_V4 : + case Hexagon::STrih_GP_cPt_nv_V4 : + case Hexagon::STrih_GP_cNotPt_nv_V4 : + case Hexagon::STrih_GP_cdnPt_nv_V4 : + case Hexagon::STrih_GP_cdnNotPt_nv_V4 : + case Hexagon::STriw_GP_cPt_nv_V4 : + case Hexagon::STriw_GP_cNotPt_nv_V4 : + case Hexagon::STriw_GP_cdnPt_nv_V4 : + case Hexagon::STriw_GP_cdnNotPt_nv_V4 : + case Hexagon::STb_GP_cPt_nv_V4 : + case Hexagon::STb_GP_cNotPt_nv_V4 : + case Hexagon::STb_GP_cdnPt_nv_V4 : + case Hexagon::STb_GP_cdnNotPt_nv_V4 : + case Hexagon::STh_GP_cPt_nv_V4 : + case Hexagon::STh_GP_cNotPt_nv_V4 : + case Hexagon::STh_GP_cdnPt_nv_V4 : + case Hexagon::STh_GP_cdnNotPt_nv_V4 : + case Hexagon::STw_GP_cPt_nv_V4 : + case Hexagon::STw_GP_cNotPt_nv_V4 : + case Hexagon::STw_GP_cdnPt_nv_V4 : + case Hexagon::STw_GP_cdnNotPt_nv_V4 : + + // TFR_FI + case Hexagon::TFR_FI_immext_V4: + + // TFRI_F + case Hexagon::TFRI_f: + case Hexagon::TFRI_cPt_f: + case Hexagon::TFRI_cNotPt_f: + case Hexagon::CONST64_Float_Real: + return true; + } +} + +bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const { + switch (MI->getOpcode()) { + default: return false; + // JMP_EQri + case Hexagon::JMP_EQriPt_nv_V4: + case Hexagon::JMP_EQriPnt_nv_V4: + case Hexagon::JMP_EQriNotPt_nv_V4: + case Hexagon::JMP_EQriNotPnt_nv_V4: + case Hexagon::JMP_EQriPt_ie_nv_V4: + case Hexagon::JMP_EQriPnt_ie_nv_V4: + case Hexagon::JMP_EQriNotPt_ie_nv_V4: + case Hexagon::JMP_EQriNotPnt_ie_nv_V4: + + // JMP_EQri - with -1 + case Hexagon::JMP_EQriPtneg_nv_V4: + case Hexagon::JMP_EQriPntneg_nv_V4: + case Hexagon::JMP_EQriNotPtneg_nv_V4: + case Hexagon::JMP_EQriNotPntneg_nv_V4: + case Hexagon::JMP_EQriPtneg_ie_nv_V4: + case Hexagon::JMP_EQriPntneg_ie_nv_V4: + case Hexagon::JMP_EQriNotPtneg_ie_nv_V4: + case Hexagon::JMP_EQriNotPntneg_ie_nv_V4: + + // JMP_EQrr + case Hexagon::JMP_EQrrPt_nv_V4: + case Hexagon::JMP_EQrrPnt_nv_V4: + case Hexagon::JMP_EQrrNotPt_nv_V4: + case Hexagon::JMP_EQrrNotPnt_nv_V4: + case Hexagon::JMP_EQrrPt_ie_nv_V4: + case Hexagon::JMP_EQrrPnt_ie_nv_V4: + case Hexagon::JMP_EQrrNotPt_ie_nv_V4: + case Hexagon::JMP_EQrrNotPnt_ie_nv_V4: + + // JMP_GTri + case Hexagon::JMP_GTriPt_nv_V4: + case Hexagon::JMP_GTriPnt_nv_V4: + case Hexagon::JMP_GTriNotPt_nv_V4: + case Hexagon::JMP_GTriNotPnt_nv_V4: + case Hexagon::JMP_GTriPt_ie_nv_V4: + case Hexagon::JMP_GTriPnt_ie_nv_V4: + case Hexagon::JMP_GTriNotPt_ie_nv_V4: + case Hexagon::JMP_GTriNotPnt_ie_nv_V4: + + // JMP_GTri - with -1 + case Hexagon::JMP_GTriPtneg_nv_V4: + case Hexagon::JMP_GTriPntneg_nv_V4: + case Hexagon::JMP_GTriNotPtneg_nv_V4: + case Hexagon::JMP_GTriNotPntneg_nv_V4: + case Hexagon::JMP_GTriPtneg_ie_nv_V4: + case Hexagon::JMP_GTriPntneg_ie_nv_V4: + case Hexagon::JMP_GTriNotPtneg_ie_nv_V4: + case Hexagon::JMP_GTriNotPntneg_ie_nv_V4: + + // JMP_GTrr + case Hexagon::JMP_GTrrPt_nv_V4: + case Hexagon::JMP_GTrrPnt_nv_V4: + case Hexagon::JMP_GTrrNotPt_nv_V4: + case Hexagon::JMP_GTrrNotPnt_nv_V4: + case Hexagon::JMP_GTrrPt_ie_nv_V4: + case Hexagon::JMP_GTrrPnt_ie_nv_V4: + case Hexagon::JMP_GTrrNotPt_ie_nv_V4: + case Hexagon::JMP_GTrrNotPnt_ie_nv_V4: + + // JMP_GTrrdn + case Hexagon::JMP_GTrrdnPt_nv_V4: + case Hexagon::JMP_GTrrdnPnt_nv_V4: + case Hexagon::JMP_GTrrdnNotPt_nv_V4: + case Hexagon::JMP_GTrrdnNotPnt_nv_V4: + case Hexagon::JMP_GTrrdnPt_ie_nv_V4: + case Hexagon::JMP_GTrrdnPnt_ie_nv_V4: + case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4: + case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4: + + // JMP_GTUri + case Hexagon::JMP_GTUriPt_nv_V4: + case Hexagon::JMP_GTUriPnt_nv_V4: + case Hexagon::JMP_GTUriNotPt_nv_V4: + case Hexagon::JMP_GTUriNotPnt_nv_V4: + case Hexagon::JMP_GTUriPt_ie_nv_V4: + case Hexagon::JMP_GTUriPnt_ie_nv_V4: + case Hexagon::JMP_GTUriNotPt_ie_nv_V4: + case Hexagon::JMP_GTUriNotPnt_ie_nv_V4: + + // JMP_GTUrr + case Hexagon::JMP_GTUrrPt_nv_V4: + case Hexagon::JMP_GTUrrPnt_nv_V4: + case Hexagon::JMP_GTUrrNotPt_nv_V4: + case Hexagon::JMP_GTUrrNotPnt_nv_V4: + case Hexagon::JMP_GTUrrPt_ie_nv_V4: + case Hexagon::JMP_GTUrrPnt_ie_nv_V4: + case Hexagon::JMP_GTUrrNotPt_ie_nv_V4: + case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4: + + // JMP_GTUrrdn + case Hexagon::JMP_GTUrrdnPt_nv_V4: + case Hexagon::JMP_GTUrrdnPnt_nv_V4: + case Hexagon::JMP_GTUrrdnNotPt_nv_V4: + case Hexagon::JMP_GTUrrdnNotPnt_nv_V4: + case Hexagon::JMP_GTUrrdnPt_ie_nv_V4: + case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4: + case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4: + case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4: + return true; + } +} + +unsigned HexagonInstrInfo::getImmExtForm(const MachineInstr* MI) const { + switch(MI->getOpcode()) { + default: llvm_unreachable("Unknown type of instruction."); + // JMP_EQri + case Hexagon::JMP_EQriPt_nv_V4: + return Hexagon::JMP_EQriPt_ie_nv_V4; + case Hexagon::JMP_EQriNotPt_nv_V4: + return Hexagon::JMP_EQriNotPt_ie_nv_V4; + case Hexagon::JMP_EQriPnt_nv_V4: + return Hexagon::JMP_EQriPnt_ie_nv_V4; + case Hexagon::JMP_EQriNotPnt_nv_V4: + return Hexagon::JMP_EQriNotPnt_ie_nv_V4; + + // JMP_EQri -- with -1 + case Hexagon::JMP_EQriPtneg_nv_V4: + return Hexagon::JMP_EQriPtneg_ie_nv_V4; + case Hexagon::JMP_EQriNotPtneg_nv_V4: + return Hexagon::JMP_EQriNotPtneg_ie_nv_V4; + case Hexagon::JMP_EQriPntneg_nv_V4: + return Hexagon::JMP_EQriPntneg_ie_nv_V4; + case Hexagon::JMP_EQriNotPntneg_nv_V4: + return Hexagon::JMP_EQriNotPntneg_ie_nv_V4; + + // JMP_EQrr + case Hexagon::JMP_EQrrPt_nv_V4: + return Hexagon::JMP_EQrrPt_ie_nv_V4; + case Hexagon::JMP_EQrrNotPt_nv_V4: + return Hexagon::JMP_EQrrNotPt_ie_nv_V4; + case Hexagon::JMP_EQrrPnt_nv_V4: + return Hexagon::JMP_EQrrPnt_ie_nv_V4; + case Hexagon::JMP_EQrrNotPnt_nv_V4: + return Hexagon::JMP_EQrrNotPnt_ie_nv_V4; + + // JMP_GTri + case Hexagon::JMP_GTriPt_nv_V4: + return Hexagon::JMP_GTriPt_ie_nv_V4; + case Hexagon::JMP_GTriNotPt_nv_V4: + return Hexagon::JMP_GTriNotPt_ie_nv_V4; + case Hexagon::JMP_GTriPnt_nv_V4: + return Hexagon::JMP_GTriPnt_ie_nv_V4; + case Hexagon::JMP_GTriNotPnt_nv_V4: + return Hexagon::JMP_GTriNotPnt_ie_nv_V4; + + // JMP_GTri -- with -1 + case Hexagon::JMP_GTriPtneg_nv_V4: + return Hexagon::JMP_GTriPtneg_ie_nv_V4; + case Hexagon::JMP_GTriNotPtneg_nv_V4: + return Hexagon::JMP_GTriNotPtneg_ie_nv_V4; + case Hexagon::JMP_GTriPntneg_nv_V4: + return Hexagon::JMP_GTriPntneg_ie_nv_V4; + case Hexagon::JMP_GTriNotPntneg_nv_V4: + return Hexagon::JMP_GTriNotPntneg_ie_nv_V4; + + // JMP_GTrr + case Hexagon::JMP_GTrrPt_nv_V4: + return Hexagon::JMP_GTrrPt_ie_nv_V4; + case Hexagon::JMP_GTrrNotPt_nv_V4: + return Hexagon::JMP_GTrrNotPt_ie_nv_V4; + case Hexagon::JMP_GTrrPnt_nv_V4: + return Hexagon::JMP_GTrrPnt_ie_nv_V4; + case Hexagon::JMP_GTrrNotPnt_nv_V4: + return Hexagon::JMP_GTrrNotPnt_ie_nv_V4; + + // JMP_GTrrdn + case Hexagon::JMP_GTrrdnPt_nv_V4: + return Hexagon::JMP_GTrrdnPt_ie_nv_V4; + case Hexagon::JMP_GTrrdnNotPt_nv_V4: + return Hexagon::JMP_GTrrdnNotPt_ie_nv_V4; + case Hexagon::JMP_GTrrdnPnt_nv_V4: + return Hexagon::JMP_GTrrdnPnt_ie_nv_V4; + case Hexagon::JMP_GTrrdnNotPnt_nv_V4: + return Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4; + + // JMP_GTUri + case Hexagon::JMP_GTUriPt_nv_V4: + return Hexagon::JMP_GTUriPt_ie_nv_V4; + case Hexagon::JMP_GTUriNotPt_nv_V4: + return Hexagon::JMP_GTUriNotPt_ie_nv_V4; + case Hexagon::JMP_GTUriPnt_nv_V4: + return Hexagon::JMP_GTUriPnt_ie_nv_V4; + case Hexagon::JMP_GTUriNotPnt_nv_V4: + return Hexagon::JMP_GTUriNotPnt_ie_nv_V4; + + // JMP_GTUrr + case Hexagon::JMP_GTUrrPt_nv_V4: + return Hexagon::JMP_GTUrrPt_ie_nv_V4; + case Hexagon::JMP_GTUrrNotPt_nv_V4: + return Hexagon::JMP_GTUrrNotPt_ie_nv_V4; + case Hexagon::JMP_GTUrrPnt_nv_V4: + return Hexagon::JMP_GTUrrPnt_ie_nv_V4; + case Hexagon::JMP_GTUrrNotPnt_nv_V4: + return Hexagon::JMP_GTUrrNotPnt_ie_nv_V4; + + // JMP_GTUrrdn + case Hexagon::JMP_GTUrrdnPt_nv_V4: + return Hexagon::JMP_GTUrrdnPt_ie_nv_V4; + case Hexagon::JMP_GTUrrdnNotPt_nv_V4: + return Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4; + case Hexagon::JMP_GTUrrdnPnt_nv_V4: + return Hexagon::JMP_GTUrrdnPnt_ie_nv_V4; + case Hexagon::JMP_GTUrrdnNotPnt_nv_V4: + return Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4; + + case Hexagon::TFR_FI: + return Hexagon::TFR_FI_immext_V4; + + case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 : + case Hexagon::MEMw_ADDi_indexed_MEM_V4 : + case Hexagon::MEMw_SUBi_indexed_MEM_V4 : + case Hexagon::MEMw_ADDr_indexed_MEM_V4 : + case Hexagon::MEMw_SUBr_indexed_MEM_V4 : + case Hexagon::MEMw_ANDr_indexed_MEM_V4 : + case Hexagon::MEMw_ORr_indexed_MEM_V4 : + case Hexagon::MEMw_ADDSUBi_MEM_V4 : + case Hexagon::MEMw_ADDi_MEM_V4 : + case Hexagon::MEMw_SUBi_MEM_V4 : + case Hexagon::MEMw_ADDr_MEM_V4 : + case Hexagon::MEMw_SUBr_MEM_V4 : + case Hexagon::MEMw_ANDr_MEM_V4 : + case Hexagon::MEMw_ORr_MEM_V4 : + case Hexagon::MEMh_ADDSUBi_indexed_MEM_V4 : + case Hexagon::MEMh_ADDi_indexed_MEM_V4 : + case Hexagon::MEMh_SUBi_indexed_MEM_V4 : + case Hexagon::MEMh_ADDr_indexed_MEM_V4 : + case Hexagon::MEMh_SUBr_indexed_MEM_V4 : + case Hexagon::MEMh_ANDr_indexed_MEM_V4 : + case Hexagon::MEMh_ORr_indexed_MEM_V4 : + case Hexagon::MEMh_ADDSUBi_MEM_V4 : + case Hexagon::MEMh_ADDi_MEM_V4 : + case Hexagon::MEMh_SUBi_MEM_V4 : + case Hexagon::MEMh_ADDr_MEM_V4 : + case Hexagon::MEMh_SUBr_MEM_V4 : + case Hexagon::MEMh_ANDr_MEM_V4 : + case Hexagon::MEMh_ORr_MEM_V4 : + case Hexagon::MEMb_ADDSUBi_indexed_MEM_V4 : + case Hexagon::MEMb_ADDi_indexed_MEM_V4 : + case Hexagon::MEMb_SUBi_indexed_MEM_V4 : + case Hexagon::MEMb_ADDr_indexed_MEM_V4 : + case Hexagon::MEMb_SUBr_indexed_MEM_V4 : + case Hexagon::MEMb_ANDr_indexed_MEM_V4 : + case Hexagon::MEMb_ORr_indexed_MEM_V4 : + case Hexagon::MEMb_ADDSUBi_MEM_V4 : + case Hexagon::MEMb_ADDi_MEM_V4 : + case Hexagon::MEMb_SUBi_MEM_V4 : + case Hexagon::MEMb_ADDr_MEM_V4 : + case Hexagon::MEMb_SUBr_MEM_V4 : + case Hexagon::MEMb_ANDr_MEM_V4 : + case Hexagon::MEMb_ORr_MEM_V4 : + llvm_unreachable("Needs implementing."); + } +} + +unsigned HexagonInstrInfo::getNormalBranchForm(const MachineInstr* MI) const { + switch(MI->getOpcode()) { + default: llvm_unreachable("Unknown type of jump instruction."); + // JMP_EQri + case Hexagon::JMP_EQriPt_ie_nv_V4: + return Hexagon::JMP_EQriPt_nv_V4; + case Hexagon::JMP_EQriNotPt_ie_nv_V4: + return Hexagon::JMP_EQriNotPt_nv_V4; + case Hexagon::JMP_EQriPnt_ie_nv_V4: + return Hexagon::JMP_EQriPnt_nv_V4; + case Hexagon::JMP_EQriNotPnt_ie_nv_V4: + return Hexagon::JMP_EQriNotPnt_nv_V4; + + // JMP_EQri -- with -1 + case Hexagon::JMP_EQriPtneg_ie_nv_V4: + return Hexagon::JMP_EQriPtneg_nv_V4; + case Hexagon::JMP_EQriNotPtneg_ie_nv_V4: + return Hexagon::JMP_EQriNotPtneg_nv_V4; + case Hexagon::JMP_EQriPntneg_ie_nv_V4: + return Hexagon::JMP_EQriPntneg_nv_V4; + case Hexagon::JMP_EQriNotPntneg_ie_nv_V4: + return Hexagon::JMP_EQriNotPntneg_nv_V4; + + // JMP_EQrr + case Hexagon::JMP_EQrrPt_ie_nv_V4: + return Hexagon::JMP_EQrrPt_nv_V4; + case Hexagon::JMP_EQrrNotPt_ie_nv_V4: + return Hexagon::JMP_EQrrNotPt_nv_V4; + case Hexagon::JMP_EQrrPnt_ie_nv_V4: + return Hexagon::JMP_EQrrPnt_nv_V4; + case Hexagon::JMP_EQrrNotPnt_ie_nv_V4: + return Hexagon::JMP_EQrrNotPnt_nv_V4; + + // JMP_GTri + case Hexagon::JMP_GTriPt_ie_nv_V4: + return Hexagon::JMP_GTriPt_nv_V4; + case Hexagon::JMP_GTriNotPt_ie_nv_V4: + return Hexagon::JMP_GTriNotPt_nv_V4; + case Hexagon::JMP_GTriPnt_ie_nv_V4: + return Hexagon::JMP_GTriPnt_nv_V4; + case Hexagon::JMP_GTriNotPnt_ie_nv_V4: + return Hexagon::JMP_GTriNotPnt_nv_V4; + + // JMP_GTri -- with -1 + case Hexagon::JMP_GTriPtneg_ie_nv_V4: + return Hexagon::JMP_GTriPtneg_nv_V4; + case Hexagon::JMP_GTriNotPtneg_ie_nv_V4: + return Hexagon::JMP_GTriNotPtneg_nv_V4; + case Hexagon::JMP_GTriPntneg_ie_nv_V4: + return Hexagon::JMP_GTriPntneg_nv_V4; + case Hexagon::JMP_GTriNotPntneg_ie_nv_V4: + return Hexagon::JMP_GTriNotPntneg_nv_V4; + + // JMP_GTrr + case Hexagon::JMP_GTrrPt_ie_nv_V4: + return Hexagon::JMP_GTrrPt_nv_V4; + case Hexagon::JMP_GTrrNotPt_ie_nv_V4: + return Hexagon::JMP_GTrrNotPt_nv_V4; + case Hexagon::JMP_GTrrPnt_ie_nv_V4: + return Hexagon::JMP_GTrrPnt_nv_V4; + case Hexagon::JMP_GTrrNotPnt_ie_nv_V4: + return Hexagon::JMP_GTrrNotPnt_nv_V4; + + // JMP_GTrrdn + case Hexagon::JMP_GTrrdnPt_ie_nv_V4: + return Hexagon::JMP_GTrrdnPt_nv_V4; + case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4: + return Hexagon::JMP_GTrrdnNotPt_nv_V4; + case Hexagon::JMP_GTrrdnPnt_ie_nv_V4: + return Hexagon::JMP_GTrrdnPnt_nv_V4; + case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4: + return Hexagon::JMP_GTrrdnNotPnt_nv_V4; + + // JMP_GTUri + case Hexagon::JMP_GTUriPt_ie_nv_V4: + return Hexagon::JMP_GTUriPt_nv_V4; + case Hexagon::JMP_GTUriNotPt_ie_nv_V4: + return Hexagon::JMP_GTUriNotPt_nv_V4; + case Hexagon::JMP_GTUriPnt_ie_nv_V4: + return Hexagon::JMP_GTUriPnt_nv_V4; + case Hexagon::JMP_GTUriNotPnt_ie_nv_V4: + return Hexagon::JMP_GTUriNotPnt_nv_V4; + + // JMP_GTUrr + case Hexagon::JMP_GTUrrPt_ie_nv_V4: + return Hexagon::JMP_GTUrrPt_nv_V4; + case Hexagon::JMP_GTUrrNotPt_ie_nv_V4: + return Hexagon::JMP_GTUrrNotPt_nv_V4; + case Hexagon::JMP_GTUrrPnt_ie_nv_V4: + return Hexagon::JMP_GTUrrPnt_nv_V4; + case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4: + return Hexagon::JMP_GTUrrNotPnt_nv_V4; + + // JMP_GTUrrdn + case Hexagon::JMP_GTUrrdnPt_ie_nv_V4: + return Hexagon::JMP_GTUrrdnPt_nv_V4; + case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4: + return Hexagon::JMP_GTUrrdnNotPt_nv_V4; + case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4: + return Hexagon::JMP_GTUrrdnPnt_nv_V4; + case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4: + return Hexagon::JMP_GTUrrdnNotPnt_nv_V4; + } +} + + +bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const { + switch (MI->getOpcode()) { + default: return false; + // Store Byte + case Hexagon::STrib_nv_V4: + case Hexagon::STrib_indexed_nv_V4: + case Hexagon::STrib_indexed_shl_nv_V4: + case Hexagon::STrib_shl_nv_V4: + case Hexagon::STrib_GP_nv_V4: + case Hexagon::STb_GP_nv_V4: + case Hexagon::POST_STbri_nv_V4: + case Hexagon::STrib_cPt_nv_V4: + case Hexagon::STrib_cdnPt_nv_V4: + case Hexagon::STrib_cNotPt_nv_V4: + case Hexagon::STrib_cdnNotPt_nv_V4: + case Hexagon::STrib_indexed_cPt_nv_V4: + case Hexagon::STrib_indexed_cdnPt_nv_V4: + case Hexagon::STrib_indexed_cNotPt_nv_V4: + case Hexagon::STrib_indexed_cdnNotPt_nv_V4: + case Hexagon::STrib_indexed_shl_cPt_nv_V4: + case Hexagon::STrib_indexed_shl_cdnPt_nv_V4: + case Hexagon::STrib_indexed_shl_cNotPt_nv_V4: + case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4: + case Hexagon::POST_STbri_cPt_nv_V4: + case Hexagon::POST_STbri_cdnPt_nv_V4: + case Hexagon::POST_STbri_cNotPt_nv_V4: + case Hexagon::POST_STbri_cdnNotPt_nv_V4: + case Hexagon::STb_GP_cPt_nv_V4: + case Hexagon::STb_GP_cNotPt_nv_V4: + case Hexagon::STb_GP_cdnPt_nv_V4: + case Hexagon::STb_GP_cdnNotPt_nv_V4: + case Hexagon::STrib_GP_cPt_nv_V4: + case Hexagon::STrib_GP_cNotPt_nv_V4: + case Hexagon::STrib_GP_cdnPt_nv_V4: + case Hexagon::STrib_GP_cdnNotPt_nv_V4: + case Hexagon::STrib_abs_nv_V4: + case Hexagon::STrib_abs_cPt_nv_V4: + case Hexagon::STrib_abs_cdnPt_nv_V4: + case Hexagon::STrib_abs_cNotPt_nv_V4: + case Hexagon::STrib_abs_cdnNotPt_nv_V4: + case Hexagon::STrib_imm_abs_nv_V4: + case Hexagon::STrib_imm_abs_cPt_nv_V4: + case Hexagon::STrib_imm_abs_cdnPt_nv_V4: + case Hexagon::STrib_imm_abs_cNotPt_nv_V4: + case Hexagon::STrib_imm_abs_cdnNotPt_nv_V4: + + // Store Halfword + case Hexagon::STrih_nv_V4: + case Hexagon::STrih_indexed_nv_V4: + case Hexagon::STrih_indexed_shl_nv_V4: + case Hexagon::STrih_shl_nv_V4: + case Hexagon::STrih_GP_nv_V4: + case Hexagon::STh_GP_nv_V4: + case Hexagon::POST_SThri_nv_V4: + case Hexagon::STrih_cPt_nv_V4: + case Hexagon::STrih_cdnPt_nv_V4: + case Hexagon::STrih_cNotPt_nv_V4: + case Hexagon::STrih_cdnNotPt_nv_V4: + case Hexagon::STrih_indexed_cPt_nv_V4: + case Hexagon::STrih_indexed_cdnPt_nv_V4: + case Hexagon::STrih_indexed_cNotPt_nv_V4: + case Hexagon::STrih_indexed_cdnNotPt_nv_V4: + case Hexagon::STrih_indexed_shl_cPt_nv_V4: + case Hexagon::STrih_indexed_shl_cdnPt_nv_V4: + case Hexagon::STrih_indexed_shl_cNotPt_nv_V4: + case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4: + case Hexagon::POST_SThri_cPt_nv_V4: + case Hexagon::POST_SThri_cdnPt_nv_V4: + case Hexagon::POST_SThri_cNotPt_nv_V4: + case Hexagon::POST_SThri_cdnNotPt_nv_V4: + case Hexagon::STh_GP_cPt_nv_V4: + case Hexagon::STh_GP_cNotPt_nv_V4: + case Hexagon::STh_GP_cdnPt_nv_V4: + case Hexagon::STh_GP_cdnNotPt_nv_V4: + case Hexagon::STrih_GP_cPt_nv_V4: + case Hexagon::STrih_GP_cNotPt_nv_V4: + case Hexagon::STrih_GP_cdnPt_nv_V4: + case Hexagon::STrih_GP_cdnNotPt_nv_V4: + case Hexagon::STrih_abs_nv_V4: + case Hexagon::STrih_abs_cPt_nv_V4: + case Hexagon::STrih_abs_cdnPt_nv_V4: + case Hexagon::STrih_abs_cNotPt_nv_V4: + case Hexagon::STrih_abs_cdnNotPt_nv_V4: + case Hexagon::STrih_imm_abs_nv_V4: + case Hexagon::STrih_imm_abs_cPt_nv_V4: + case Hexagon::STrih_imm_abs_cdnPt_nv_V4: + case Hexagon::STrih_imm_abs_cNotPt_nv_V4: + case Hexagon::STrih_imm_abs_cdnNotPt_nv_V4: + + // Store Word + case Hexagon::STriw_nv_V4: + case Hexagon::STriw_indexed_nv_V4: + case Hexagon::STriw_indexed_shl_nv_V4: + case Hexagon::STriw_shl_nv_V4: + case Hexagon::STriw_GP_nv_V4: + case Hexagon::STw_GP_nv_V4: + case Hexagon::POST_STwri_nv_V4: + case Hexagon::STriw_cPt_nv_V4: + case Hexagon::STriw_cdnPt_nv_V4: + case Hexagon::STriw_cNotPt_nv_V4: + case Hexagon::STriw_cdnNotPt_nv_V4: + case Hexagon::STriw_indexed_cPt_nv_V4: + case Hexagon::STriw_indexed_cdnPt_nv_V4: + case Hexagon::STriw_indexed_cNotPt_nv_V4: + case Hexagon::STriw_indexed_cdnNotPt_nv_V4: + case Hexagon::STriw_indexed_shl_cPt_nv_V4: + case Hexagon::STriw_indexed_shl_cdnPt_nv_V4: + case Hexagon::STriw_indexed_shl_cNotPt_nv_V4: + case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4: + case Hexagon::POST_STwri_cPt_nv_V4: + case Hexagon::POST_STwri_cdnPt_nv_V4: + case Hexagon::POST_STwri_cNotPt_nv_V4: + case Hexagon::POST_STwri_cdnNotPt_nv_V4: + case Hexagon::STw_GP_cPt_nv_V4: + case Hexagon::STw_GP_cNotPt_nv_V4: + case Hexagon::STw_GP_cdnPt_nv_V4: + case Hexagon::STw_GP_cdnNotPt_nv_V4: + case Hexagon::STriw_GP_cPt_nv_V4: + case Hexagon::STriw_GP_cNotPt_nv_V4: + case Hexagon::STriw_GP_cdnPt_nv_V4: + case Hexagon::STriw_GP_cdnNotPt_nv_V4: + case Hexagon::STriw_abs_nv_V4: + case Hexagon::STriw_abs_cPt_nv_V4: + case Hexagon::STriw_abs_cdnPt_nv_V4: + case Hexagon::STriw_abs_cNotPt_nv_V4: + case Hexagon::STriw_abs_cdnNotPt_nv_V4: + case Hexagon::STriw_imm_abs_nv_V4: + case Hexagon::STriw_imm_abs_cPt_nv_V4: + case Hexagon::STriw_imm_abs_cdnPt_nv_V4: + case Hexagon::STriw_imm_abs_cNotPt_nv_V4: + case Hexagon::STriw_imm_abs_cdnNotPt_nv_V4: + return true; + } +} + +bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const { + switch (MI->getOpcode()) + { + default: return false; + // Load Byte + case Hexagon::POST_LDrib: + case Hexagon::POST_LDrib_cPt: + case Hexagon::POST_LDrib_cNotPt: + case Hexagon::POST_LDrib_cdnPt_V4: + case Hexagon::POST_LDrib_cdnNotPt_V4: + + // Load unsigned byte + case Hexagon::POST_LDriub: + case Hexagon::POST_LDriub_cPt: + case Hexagon::POST_LDriub_cNotPt: + case Hexagon::POST_LDriub_cdnPt_V4: + case Hexagon::POST_LDriub_cdnNotPt_V4: + + // Load halfword + case Hexagon::POST_LDrih: + case Hexagon::POST_LDrih_cPt: + case Hexagon::POST_LDrih_cNotPt: + case Hexagon::POST_LDrih_cdnPt_V4: + case Hexagon::POST_LDrih_cdnNotPt_V4: + + // Load unsigned halfword + case Hexagon::POST_LDriuh: + case Hexagon::POST_LDriuh_cPt: + case Hexagon::POST_LDriuh_cNotPt: + case Hexagon::POST_LDriuh_cdnPt_V4: + case Hexagon::POST_LDriuh_cdnNotPt_V4: + + // Load word + case Hexagon::POST_LDriw: + case Hexagon::POST_LDriw_cPt: + case Hexagon::POST_LDriw_cNotPt: + case Hexagon::POST_LDriw_cdnPt_V4: + case Hexagon::POST_LDriw_cdnNotPt_V4: + + // Load double word + case Hexagon::POST_LDrid: + case Hexagon::POST_LDrid_cPt: + case Hexagon::POST_LDrid_cNotPt: + case Hexagon::POST_LDrid_cdnPt_V4: + case Hexagon::POST_LDrid_cdnNotPt_V4: + + // Store byte + case Hexagon::POST_STbri: + case Hexagon::POST_STbri_cPt: + case Hexagon::POST_STbri_cNotPt: + case Hexagon::POST_STbri_cdnPt_V4: + case Hexagon::POST_STbri_cdnNotPt_V4: + + // Store halfword + case Hexagon::POST_SThri: + case Hexagon::POST_SThri_cPt: + case Hexagon::POST_SThri_cNotPt: + case Hexagon::POST_SThri_cdnPt_V4: + case Hexagon::POST_SThri_cdnNotPt_V4: + + // Store word + case Hexagon::POST_STwri: + case Hexagon::POST_STwri_cPt: + case Hexagon::POST_STwri_cNotPt: + case Hexagon::POST_STwri_cdnPt_V4: + case Hexagon::POST_STwri_cdnNotPt_V4: + + // Store double word + case Hexagon::POST_STdri: + case Hexagon::POST_STdri_cPt: + case Hexagon::POST_STdri_cNotPt: + case Hexagon::POST_STdri_cdnPt_V4: + case Hexagon::POST_STdri_cdnNotPt_V4: + return true; + } +} + +bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const { + return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4; +} bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const { bool isPred = MI->getDesc().isPredicable(); @@ -548,7 +1388,7 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const { case Hexagon::SXTH: case Hexagon::ZXTB: case Hexagon::ZXTH: - return Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4; + return Subtarget.hasV4TOps(); case Hexagon::JMPR: return false; @@ -557,8 +1397,27 @@ bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const { return true; } +// This function performs the following inversiones: +// +// cPt ---> cNotPt +// cNotPt ---> cPt +// +// however, these inversiones are NOT included: +// +// cdnPt -X-> cdnNotPt +// cdnNotPt -X-> cdnPt +// cPt_nv -X-> cNotPt_nv (new value stores) +// cNotPt_nv -X-> cPt_nv (new value stores) +// +// because only the following transformations are allowed: +// +// cNotPt ---> cdnNotPt +// cPt ---> cdnPt +// cNotPt ---> cNotPt_nv +// cPt ---> cPt_nv unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const { switch(Opc) { + default: llvm_unreachable("Unexpected predicated instruction"); case Hexagon::TFR_cPt: return Hexagon::TFR_cNotPt; case Hexagon::TFR_cNotPt: @@ -805,6 +1664,47 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const { case Hexagon::STrid_indexed_shl_cNotPt_V4: return Hexagon::STrid_indexed_shl_cPt_V4; + // V4 Store to global address. + case Hexagon::STd_GP_cPt_V4: + return Hexagon::STd_GP_cNotPt_V4; + case Hexagon::STd_GP_cNotPt_V4: + return Hexagon::STd_GP_cPt_V4; + + case Hexagon::STb_GP_cPt_V4: + return Hexagon::STb_GP_cNotPt_V4; + case Hexagon::STb_GP_cNotPt_V4: + return Hexagon::STb_GP_cPt_V4; + + case Hexagon::STh_GP_cPt_V4: + return Hexagon::STh_GP_cNotPt_V4; + case Hexagon::STh_GP_cNotPt_V4: + return Hexagon::STh_GP_cPt_V4; + + case Hexagon::STw_GP_cPt_V4: + return Hexagon::STw_GP_cNotPt_V4; + case Hexagon::STw_GP_cNotPt_V4: + return Hexagon::STw_GP_cPt_V4; + + case Hexagon::STrid_GP_cPt_V4: + return Hexagon::STrid_GP_cNotPt_V4; + case Hexagon::STrid_GP_cNotPt_V4: + return Hexagon::STrid_GP_cPt_V4; + + case Hexagon::STrib_GP_cPt_V4: + return Hexagon::STrib_GP_cNotPt_V4; + case Hexagon::STrib_GP_cNotPt_V4: + return Hexagon::STrib_GP_cPt_V4; + + case Hexagon::STrih_GP_cPt_V4: + return Hexagon::STrih_GP_cNotPt_V4; + case Hexagon::STrih_GP_cNotPt_V4: + return Hexagon::STrih_GP_cPt_V4; + + case Hexagon::STriw_GP_cPt_V4: + return Hexagon::STriw_GP_cNotPt_V4; + case Hexagon::STriw_GP_cNotPt_V4: + return Hexagon::STriw_GP_cPt_V4; + // Load. case Hexagon::LDrid_cPt: return Hexagon::LDrid_cNotPt; @@ -1009,9 +1909,6 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const { return Hexagon::JMP_GTUrrdnNotPnt_nv_V4; case Hexagon::JMP_GTUrrdnNotPnt_nv_V4: return Hexagon::JMP_GTUrrdnPnt_nv_V4; - - default: - llvm_unreachable("Unexpected predicated instruction"); } } @@ -1022,12 +1919,21 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { case Hexagon::TFR: return !invertPredicate ? Hexagon::TFR_cPt : Hexagon::TFR_cNotPt; + case Hexagon::TFRI_f: + return !invertPredicate ? Hexagon::TFRI_cPt_f : + Hexagon::TFRI_cNotPt_f; case Hexagon::TFRI: return !invertPredicate ? Hexagon::TFRI_cPt : Hexagon::TFRI_cNotPt; case Hexagon::JMP: return !invertPredicate ? Hexagon::JMP_c : Hexagon::JMP_cNot; + case Hexagon::JMP_EQrrPt_nv_V4: + return !invertPredicate ? Hexagon::JMP_EQrrPt_nv_V4 : + Hexagon::JMP_EQrrNotPt_nv_V4; + case Hexagon::JMP_EQriPt_nv_V4: + return !invertPredicate ? Hexagon::JMP_EQriPt_nv_V4 : + Hexagon::JMP_EQriNotPt_nv_V4; case Hexagon::ADD_ri: return !invertPredicate ? Hexagon::ADD_ri_cPt : Hexagon::ADD_ri_cNotPt; @@ -1121,6 +2027,46 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { case Hexagon::LDriw_indexed_shl_V4: return !invertPredicate ? Hexagon::LDriw_indexed_shl_cPt_V4 : Hexagon::LDriw_indexed_shl_cNotPt_V4; + + // V4 Load from global address + case Hexagon::LDrid_GP_V4: + return !invertPredicate ? Hexagon::LDrid_GP_cPt_V4 : + Hexagon::LDrid_GP_cNotPt_V4; + case Hexagon::LDrib_GP_V4: + return !invertPredicate ? Hexagon::LDrib_GP_cPt_V4 : + Hexagon::LDrib_GP_cNotPt_V4; + case Hexagon::LDriub_GP_V4: + return !invertPredicate ? Hexagon::LDriub_GP_cPt_V4 : + Hexagon::LDriub_GP_cNotPt_V4; + case Hexagon::LDrih_GP_V4: + return !invertPredicate ? Hexagon::LDrih_GP_cPt_V4 : + Hexagon::LDrih_GP_cNotPt_V4; + case Hexagon::LDriuh_GP_V4: + return !invertPredicate ? Hexagon::LDriuh_GP_cPt_V4 : + Hexagon::LDriuh_GP_cNotPt_V4; + case Hexagon::LDriw_GP_V4: + return !invertPredicate ? Hexagon::LDriw_GP_cPt_V4 : + Hexagon::LDriw_GP_cNotPt_V4; + + case Hexagon::LDd_GP_V4: + return !invertPredicate ? Hexagon::LDd_GP_cPt_V4 : + Hexagon::LDd_GP_cNotPt_V4; + case Hexagon::LDb_GP_V4: + return !invertPredicate ? Hexagon::LDb_GP_cPt_V4 : + Hexagon::LDb_GP_cNotPt_V4; + case Hexagon::LDub_GP_V4: + return !invertPredicate ? Hexagon::LDub_GP_cPt_V4 : + Hexagon::LDub_GP_cNotPt_V4; + case Hexagon::LDh_GP_V4: + return !invertPredicate ? Hexagon::LDh_GP_cPt_V4 : + Hexagon::LDh_GP_cNotPt_V4; + case Hexagon::LDuh_GP_V4: + return !invertPredicate ? Hexagon::LDuh_GP_cPt_V4 : + Hexagon::LDuh_GP_cNotPt_V4; + case Hexagon::LDw_GP_V4: + return !invertPredicate ? Hexagon::LDw_GP_cPt_V4 : + Hexagon::LDw_GP_cNotPt_V4; + // Byte. case Hexagon::POST_STbri: return !invertPredicate ? Hexagon::POST_STbri_cPt : @@ -1182,6 +2128,34 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { case Hexagon::STrid_indexed_shl_V4: return !invertPredicate ? Hexagon::STrid_indexed_shl_cPt_V4 : Hexagon::STrid_indexed_shl_cNotPt_V4; + + // V4 Store to global address + case Hexagon::STrid_GP_V4: + return !invertPredicate ? Hexagon::STrid_GP_cPt_V4 : + Hexagon::STrid_GP_cNotPt_V4; + case Hexagon::STrib_GP_V4: + return !invertPredicate ? Hexagon::STrib_GP_cPt_V4 : + Hexagon::STrib_GP_cNotPt_V4; + case Hexagon::STrih_GP_V4: + return !invertPredicate ? Hexagon::STrih_GP_cPt_V4 : + Hexagon::STrih_GP_cNotPt_V4; + case Hexagon::STriw_GP_V4: + return !invertPredicate ? Hexagon::STriw_GP_cPt_V4 : + Hexagon::STriw_GP_cNotPt_V4; + + case Hexagon::STd_GP_V4: + return !invertPredicate ? Hexagon::STd_GP_cPt_V4 : + Hexagon::STd_GP_cNotPt_V4; + case Hexagon::STb_GP_V4: + return !invertPredicate ? Hexagon::STb_GP_cPt_V4 : + Hexagon::STb_GP_cNotPt_V4; + case Hexagon::STh_GP_V4: + return !invertPredicate ? Hexagon::STh_GP_cPt_V4 : + Hexagon::STh_GP_cNotPt_V4; + case Hexagon::STw_GP_V4: + return !invertPredicate ? Hexagon::STw_GP_cPt_V4 : + Hexagon::STw_GP_cNotPt_V4; + // Load. case Hexagon::LDrid: return !invertPredicate ? Hexagon::LDrid_cPt : @@ -1201,9 +2175,6 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { case Hexagon::LDriub: return !invertPredicate ? Hexagon::LDriub_cPt : Hexagon::LDriub_cNotPt; - case Hexagon::LDriubit: - return !invertPredicate ? Hexagon::LDriub_cPt : - Hexagon::LDriub_cNotPt; // Load Indexed. case Hexagon::LDrid_indexed: return !invertPredicate ? Hexagon::LDrid_indexed_cPt : @@ -1297,7 +2268,7 @@ PredicateInstruction(MachineInstr *MI, bool HexagonInstrInfo:: isProfitableToIfCvt(MachineBasicBlock &MBB, - unsigned NumCyles, + unsigned NumCycles, unsigned ExtraPredCycles, const BranchProbability &Probability) const { return true; @@ -1323,7 +2294,6 @@ bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const { return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask); } - bool HexagonInstrInfo::DefinesPredicate(MachineInstr *MI, std::vector<MachineOperand> &Pred) const { @@ -1331,7 +2301,7 @@ HexagonInstrInfo::DefinesPredicate(MachineInstr *MI, MachineOperand MO = MI->getOperand(oper); if (MO.isReg() && MO.isDef()) { const TargetRegisterClass* RC = RI.getMinimalPhysRegClass(MO.getReg()); - if (RC == Hexagon::PredRegsRegisterClass) { + if (RC == &Hexagon::PredRegsRegClass) { Pred.push_back(MO); return true; } @@ -1373,6 +2343,7 @@ isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs, bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const { switch (MI->getOpcode()) { + default: return false; case Hexagon::DEALLOC_RET_V4 : case Hexagon::DEALLOC_RET_cPt_V4 : case Hexagon::DEALLOC_RET_cNotPt_V4 : @@ -1382,7 +2353,6 @@ bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const { case Hexagon::DEALLOC_RET_cNotdnPt_V4 : return true; } - return false; } @@ -1396,13 +2366,17 @@ isValidOffset(const int Opcode, const int Offset) const { switch(Opcode) { case Hexagon::LDriw: + case Hexagon::LDriw_f: case Hexagon::STriw: + case Hexagon::STriw_f: assert((Offset % 4 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMW_OFFSET_MIN) && (Offset <= Hexagon_MEMW_OFFSET_MAX); case Hexagon::LDrid: + case Hexagon::LDrid_f: case Hexagon::STrid: + case Hexagon::STrid_f: assert((Offset % 8 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMD_OFFSET_MIN) && (Offset <= Hexagon_MEMD_OFFSET_MAX); @@ -1410,7 +2384,6 @@ isValidOffset(const int Opcode, const int Offset) const { case Hexagon::LDrih: case Hexagon::LDriuh: case Hexagon::STrih: - case Hexagon::LDrih_ae: assert((Offset % 2 == 0) && "Offset has incorrect alignment"); return (Offset >= Hexagon_MEMH_OFFSET_MIN) && (Offset <= Hexagon_MEMH_OFFSET_MAX); @@ -1418,9 +2391,6 @@ isValidOffset(const int Opcode, const int Offset) const { case Hexagon::LDrib: case Hexagon::STrib: case Hexagon::LDriub: - case Hexagon::LDriubit: - case Hexagon::LDrib_ae: - case Hexagon::LDriub_ae: return (Offset >= Hexagon_MEMB_OFFSET_MIN) && (Offset <= Hexagon_MEMB_OFFSET_MAX); @@ -1528,6 +2498,7 @@ bool HexagonInstrInfo:: isMemOp(const MachineInstr *MI) const { switch (MI->getOpcode()) { + default: return false; case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 : case Hexagon::MEMw_ADDi_indexed_MEM_V4 : case Hexagon::MEMw_SUBi_indexed_MEM_V4 : @@ -1570,28 +2541,59 @@ isMemOp(const MachineInstr *MI) const { case Hexagon::MEMb_SUBr_MEM_V4 : case Hexagon::MEMb_ANDr_MEM_V4 : case Hexagon::MEMb_ORr_MEM_V4 : - return true; + return true; } - return false; } bool HexagonInstrInfo:: isSpillPredRegOp(const MachineInstr *MI) const { - switch (MI->getOpcode()) - { + switch (MI->getOpcode()) { + default: return false; case Hexagon::STriw_pred : case Hexagon::LDriw_pred : - return true; + return true; + } +} + +bool HexagonInstrInfo::isNewValueJumpCandidate(const MachineInstr *MI) const { + switch (MI->getOpcode()) { + default: return false; + case Hexagon::CMPEQrr: + case Hexagon::CMPEQri: + case Hexagon::CMPLTrr: + case Hexagon::CMPGTrr: + case Hexagon::CMPGTri: + case Hexagon::CMPLTUrr: + case Hexagon::CMPGTUrr: + case Hexagon::CMPGTUri: + case Hexagon::CMPGEri: + case Hexagon::CMPGEUri: + return true; } - return false; } +bool HexagonInstrInfo:: +isConditionalTransfer (const MachineInstr *MI) const { + switch (MI->getOpcode()) { + default: return false; + case Hexagon::TFR_cPt: + case Hexagon::TFR_cNotPt: + case Hexagon::TFRI_cPt: + case Hexagon::TFRI_cNotPt: + case Hexagon::TFR_cdnPt: + case Hexagon::TFR_cdnNotPt: + case Hexagon::TFRI_cdnPt: + case Hexagon::TFRI_cdnNotPt: + return true; + } +} bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const { const HexagonRegisterInfo& QRI = getRegisterInfo(); switch (MI->getOpcode()) { + default: return false; case Hexagon::ADD_ri_cPt: case Hexagon::ADD_ri_cNotPt: case Hexagon::ADD_rr_cPt: @@ -1619,19 +2621,16 @@ bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const { case Hexagon::ZXTB_cNotPt_V4: case Hexagon::ZXTH_cPt_V4: case Hexagon::ZXTH_cNotPt_V4: - return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4; - - default: - return false; + return QRI.Subtarget.hasV4TOps(); } } - bool HexagonInstrInfo:: isConditionalLoad (const MachineInstr* MI) const { const HexagonRegisterInfo& QRI = getRegisterInfo(); switch (MI->getOpcode()) { + default: return false; case Hexagon::LDrid_cPt : case Hexagon::LDrid_cNotPt : case Hexagon::LDrid_indexed_cPt : @@ -1669,7 +2668,7 @@ isConditionalLoad (const MachineInstr* MI) const { case Hexagon::POST_LDriuh_cNotPt : case Hexagon::POST_LDriub_cPt : case Hexagon::POST_LDriub_cNotPt : - return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4; + return QRI.Subtarget.hasV4TOps(); case Hexagon::LDrid_indexed_cPt_V4 : case Hexagon::LDrid_indexed_cNotPt_V4 : case Hexagon::LDrid_indexed_shl_cPt_V4 : @@ -1694,12 +2693,136 @@ isConditionalLoad (const MachineInstr* MI) const { case Hexagon::LDriw_indexed_cNotPt_V4 : case Hexagon::LDriw_indexed_shl_cPt_V4 : case Hexagon::LDriw_indexed_shl_cNotPt_V4 : - return QRI.Subtarget.getHexagonArchVersion() == HexagonSubtarget::V4; - default: - return false; + return QRI.Subtarget.hasV4TOps(); } } +// Returns true if an instruction is a conditional store. +// +// Note: It doesn't include conditional new-value stores as they can't be +// converted to .new predicate. +// +// p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ] +// ^ ^ +// / \ (not OK. it will cause new-value store to be +// / X conditional on p0.new while R2 producer is +// / \ on p0) +// / \. +// p.new store p.old NV store +// [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new] +// ^ ^ +// \ / +// \ / +// \ / +// p.old store +// [if (p0)memw(R0+#0)=R2] +// +// The above diagram shows the steps involoved in the conversion of a predicated +// store instruction to its .new predicated new-value form. +// +// The following set of instructions further explains the scenario where +// conditional new-value store becomes invalid when promoted to .new predicate +// form. +// +// { 1) if (p0) r0 = add(r1, r2) +// 2) p0 = cmp.eq(r3, #0) } +// +// 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with +// the first two instructions because in instr 1, r0 is conditional on old value +// of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which +// is not valid for new-value stores. +bool HexagonInstrInfo:: +isConditionalStore (const MachineInstr* MI) const { + const HexagonRegisterInfo& QRI = getRegisterInfo(); + switch (MI->getOpcode()) + { + default: return false; + case Hexagon::STrib_imm_cPt_V4 : + case Hexagon::STrib_imm_cNotPt_V4 : + case Hexagon::STrib_indexed_shl_cPt_V4 : + case Hexagon::STrib_indexed_shl_cNotPt_V4 : + case Hexagon::STrib_cPt : + case Hexagon::STrib_cNotPt : + case Hexagon::POST_STbri_cPt : + case Hexagon::POST_STbri_cNotPt : + case Hexagon::STrid_indexed_cPt : + case Hexagon::STrid_indexed_cNotPt : + case Hexagon::STrid_indexed_shl_cPt_V4 : + case Hexagon::POST_STdri_cPt : + case Hexagon::POST_STdri_cNotPt : + case Hexagon::STrih_cPt : + case Hexagon::STrih_cNotPt : + case Hexagon::STrih_indexed_cPt : + case Hexagon::STrih_indexed_cNotPt : + case Hexagon::STrih_imm_cPt_V4 : + case Hexagon::STrih_imm_cNotPt_V4 : + case Hexagon::STrih_indexed_shl_cPt_V4 : + case Hexagon::STrih_indexed_shl_cNotPt_V4 : + case Hexagon::POST_SThri_cPt : + case Hexagon::POST_SThri_cNotPt : + case Hexagon::STriw_cPt : + case Hexagon::STriw_cNotPt : + case Hexagon::STriw_indexed_cPt : + case Hexagon::STriw_indexed_cNotPt : + case Hexagon::STriw_imm_cPt_V4 : + case Hexagon::STriw_imm_cNotPt_V4 : + case Hexagon::STriw_indexed_shl_cPt_V4 : + case Hexagon::STriw_indexed_shl_cNotPt_V4 : + case Hexagon::POST_STwri_cPt : + case Hexagon::POST_STwri_cNotPt : + return QRI.Subtarget.hasV4TOps(); + + // V4 global address store before promoting to dot new. + case Hexagon::STrid_GP_cPt_V4 : + case Hexagon::STrid_GP_cNotPt_V4 : + case Hexagon::STrib_GP_cPt_V4 : + case Hexagon::STrib_GP_cNotPt_V4 : + case Hexagon::STrih_GP_cPt_V4 : + case Hexagon::STrih_GP_cNotPt_V4 : + case Hexagon::STriw_GP_cPt_V4 : + case Hexagon::STriw_GP_cNotPt_V4 : + case Hexagon::STd_GP_cPt_V4 : + case Hexagon::STd_GP_cNotPt_V4 : + case Hexagon::STb_GP_cPt_V4 : + case Hexagon::STb_GP_cNotPt_V4 : + case Hexagon::STh_GP_cPt_V4 : + case Hexagon::STh_GP_cNotPt_V4 : + case Hexagon::STw_GP_cPt_V4 : + case Hexagon::STw_GP_cNotPt_V4 : + return QRI.Subtarget.hasV4TOps(); + + // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded + // from the "Conditional Store" list. Because a predicated new value store + // would NOT be promoted to a double dot new store. See diagram below: + // This function returns yes for those stores that are predicated but not + // yet promoted to predicate dot new instructions. + // + // +---------------------+ + // /-----| if (p0) memw(..)=r0 |---------\~ + // || +---------------------+ || + // promote || /\ /\ || promote + // || /||\ /||\ || + // \||/ demote || \||/ + // \/ || || \/ + // +-------------------------+ || +-------------------------+ + // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new | + // +-------------------------+ || +-------------------------+ + // || || || + // || demote \||/ + // promote || \/ NOT possible + // || || /\~ + // \||/ || /||\~ + // \/ || || + // +-----------------------------+ + // | if (p0.new) memw(..)=r0.new | + // +-----------------------------+ + // Double Dot New Store + // + } +} + + + DFAPacketizer *HexagonInstrInfo:: CreateTargetScheduleState(const TargetMachine *TM, const ScheduleDAG *DAG) const { diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h index 7306870..2bb53f8 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/lib/Target/Hexagon/HexagonInstrInfo.h @@ -112,7 +112,7 @@ public: PredicateInstruction(MachineInstr *MI, const SmallVectorImpl<MachineOperand> &Cond) const; - virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles, + virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, const BranchProbability &Probability) const; @@ -160,10 +160,21 @@ public: bool isS8_Immediate(const int value) const; bool isS6_Immediate(const int value) const; + bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const; + bool isConditionalTransfer(const MachineInstr* MI) const; bool isConditionalALU32 (const MachineInstr* MI) const; bool isConditionalLoad (const MachineInstr* MI) const; + bool isConditionalStore(const MachineInstr* MI) const; bool isDeallocRet(const MachineInstr *MI) const; unsigned getInvertedPredicatedOpcode(const int Opc) const; + bool isExtendable(const MachineInstr* MI) const; + bool isExtended(const MachineInstr* MI) const; + bool isPostIncrement(const MachineInstr* MI) const; + bool isNewValueStore(const MachineInstr* MI) const; + bool isNewValueJump(const MachineInstr* MI) const; + bool isNewValueJumpCandidate(const MachineInstr *MI) const; + unsigned getImmExtForm(const MachineInstr* MI) const; + unsigned getNormalBranchForm(const MachineInstr* MI) const; private: int getMatchingCondBranchOpcode(int Opc, bool sense) const; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td index b563ac3..c0c0df6 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/lib/Target/Hexagon/HexagonInstrInfo.td @@ -25,7 +25,10 @@ def HasV3TOnly : Predicate<"Subtarget.hasV3TOpsOnly()">; def NoV3T : Predicate<"!Subtarget.hasV3TOps()">; def HasV4T : Predicate<"Subtarget.hasV4TOps()">; def NoV4T : Predicate<"!Subtarget.hasV4TOps()">; +def HasV5T : Predicate<"Subtarget.hasV5TOps()">; +def NoV5T : Predicate<"!Subtarget.hasV5TOps()">; def UseMEMOP : Predicate<"Subtarget.useMemOps()">; +def IEEERndNearV5T : Predicate<"Subtarget.modeIEEERndNear()">; // Addressing modes. def ADDRrr : ComplexPattern<i32, 2, "SelectADDRrr", [], []>; @@ -84,10 +87,12 @@ def symbolLo32 : Operand<i32> { multiclass ALU32_rr_ri<string OpcStr, SDNode OpNode> { def rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set IntRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$b), + (i32 IntRegs:$c)))]>; def ri : ALU32_ri<(outs IntRegs:$dst), (ins s10Imm:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "(#$b, $c)")), - [(set IntRegs:$dst, (OpNode s10Imm:$b, IntRegs:$c))]>; + [(set (i32 IntRegs:$dst), (OpNode s10Imm:$b, + (i32 IntRegs:$c)))]>; } // Multi-class for compare ops. @@ -95,111 +100,114 @@ let isCompare = 1 in { multiclass CMP64_rr<string OpcStr, PatFrag OpNode> { def rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode DoubleRegs:$b, DoubleRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i64 DoubleRegs:$b), (i64 DoubleRegs:$c)))]>; } multiclass CMP32_rr<string OpcStr, PatFrag OpNode> { def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>; } multiclass CMP32_rr_ri_s10<string OpcStr, PatFrag OpNode> { def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>; def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s10Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, s10ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), s10ImmPred:$c))]>; } multiclass CMP32_rr_ri_u9<string OpcStr, PatFrag OpNode> { def rr : ALU32_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, IntRegs:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), (i32 IntRegs:$c)))]>; def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, u9ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), + (OpNode (i32 IntRegs:$b), u9ImmPred:$c))]>; } -multiclass CMP32_ri_u9<string OpcStr, PatFrag OpNode> { - def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u9Imm:$c), +multiclass CMP32_ri_u8<string OpcStr, PatFrag OpNode> { + def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, u8Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, u9ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b), + u8ImmPred:$c))]>; } multiclass CMP32_ri_s8<string OpcStr, PatFrag OpNode> { def ri : ALU32_ri<(outs PredRegs:$dst), (ins IntRegs:$b, s8Imm:$c), !strconcat("$dst = ", !strconcat(OpcStr, "($b, #$c)")), - [(set PredRegs:$dst, (OpNode IntRegs:$b, s8ImmPred:$c))]>; + [(set (i1 PredRegs:$dst), (OpNode (i32 IntRegs:$b), + s8ImmPred:$c))]>; } } //===----------------------------------------------------------------------===// -// Instructions -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// http://qualnet.qualcomm.com/~erich/v1/htmldocs/index.html -// http://qualnet.qualcomm.com/~erich/v2/htmldocs/index.html -// http://qualnet.qualcomm.com/~erich/v3/htmldocs/index.html -// http://qualnet.qualcomm.com/~erich/v4/htmldocs/index.html -// http://qualnet.qualcomm.com/~erich/v5/htmldocs/index.html -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// // ALU32/ALU + //===----------------------------------------------------------------------===// // Add. -let isPredicable = 1 in +let isCommutable = 1, isPredicable = 1 in def ADD_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = add($src1, $src2)", - [(set IntRegs:$dst, (add IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; let isPredicable = 1 in def ADD_ri : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, s16Imm:$src2), "$dst = add($src1, #$src2)", - [(set IntRegs:$dst, (add IntRegs:$src1, s16ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), + s16ImmPred:$src2))]>; // Logical operations. let isPredicable = 1 in def XOR_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = xor($src1, $src2)", - [(set IntRegs:$dst, (xor IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; -let isPredicable = 1 in +let isCommutable = 1, isPredicable = 1 in def AND_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = and($src1, $src2)", - [(set IntRegs:$dst, (and IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; def OR_ri : ALU32_ri<(outs IntRegs:$dst), - (ins IntRegs:$src1, s8Imm:$src2), + (ins IntRegs:$src1, s10Imm:$src2), "$dst = or($src1, #$src2)", - [(set IntRegs:$dst, (or IntRegs:$src1, s8ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), + s10ImmPred:$src2))]>; def NOT_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = not($src1)", - [(set IntRegs:$dst, (not IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (not (i32 IntRegs:$src1)))]>; def AND_ri : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2), "$dst = and($src1, #$src2)", - [(set IntRegs:$dst, (and IntRegs:$src1, s10ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), + s10ImmPred:$src2))]>; -let isPredicable = 1 in +let isCommutable = 1, isPredicable = 1 in def OR_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = or($src1, $src2)", - [(set IntRegs:$dst, (or IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Negate. def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = neg($src1)", - [(set IntRegs:$dst, (ineg IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (ineg (i32 IntRegs:$src1)))]>; // Nop. let neverHasSideEffects = 1 in def NOP : ALU32_rr<(outs), (ins), @@ -211,13 +219,20 @@ let isPredicable = 1 in def SUB_rr : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = sub($src1, $src2)", - [(set IntRegs:$dst, (sub IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; + +// Rd32=sub(#s10,Rs32) +def SUB_ri : ALU32_ri<(outs IntRegs:$dst), + (ins s10Imm:$src1, IntRegs:$src2), + "$dst = sub(#$src1, $src2)", + [(set IntRegs:$dst, (sub s10ImmPred:$src1, IntRegs:$src2))]>; // Transfer immediate. -let isReMaterializable = 1, isPredicable = 1 in +let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in def TFRI : ALU32_ri<(outs IntRegs:$dst), (ins s16Imm:$src1), "$dst = #$src1", - [(set IntRegs:$dst, s16ImmPred:$src1)]>; + [(set (i32 IntRegs:$dst), s16ImmPred:$src1)]>; // Transfer register. let neverHasSideEffects = 1, isPredicable = 1 in @@ -225,6 +240,11 @@ def TFR : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = $src1", []>; +let neverHasSideEffects = 1, isPredicable = 1 in +def TFR64 : ALU32_ri<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + "$dst = $src1", + []>; + // Transfer control register. let neverHasSideEffects = 1 in def TFCR : CRInst<(outs CRRegs:$dst), (ins IntRegs:$src1), @@ -246,6 +266,12 @@ def COMBINE_rr : ALU32_rr<(outs DoubleRegs:$dst), "$dst = combine($src1, $src2)", []>; +let neverHasSideEffects = 1 in +def COMBINE_ii : ALU32_ii<(outs DoubleRegs:$dst), + (ins s8Imm:$src1, s8Imm:$src2), + "$dst = combine(#$src1, #$src2)", + []>; + // Mux. def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, @@ -256,48 +282,52 @@ def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, def MUX_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst = mux($src1, $src2, $src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2, - IntRegs:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))]>; def MUX_ir : ALU32_ir<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2, IntRegs:$src3), "$dst = mux($src1, #$src2, $src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, - s8ImmPred:$src2, IntRegs:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + s8ImmPred:$src2, + (i32 IntRegs:$src3))))]>; def MUX_ri : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), "$dst = mux($src1, $src2, #$src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2, - s8ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + (i32 IntRegs:$src2), + s8ImmPred:$src3)))]>; def MUX_ii : ALU32_ii<(outs IntRegs:$dst), (ins PredRegs:$src1, s8Imm:$src2, s8Imm:$src3), "$dst = mux($src1, #$src2, #$src3)", - [(set IntRegs:$dst, (select PredRegs:$src1, s8ImmPred:$src2, - s8ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), (i32 (select (i1 PredRegs:$src1), + s8ImmPred:$src2, + s8ImmPred:$src3)))]>; // Shift halfword. let isPredicable = 1 in def ASLH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = aslh($src1)", - [(set IntRegs:$dst, (shl 16, IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (shl 16, (i32 IntRegs:$src1)))]>; let isPredicable = 1 in def ASRH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = asrh($src1)", - [(set IntRegs:$dst, (sra 16, IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), (sra 16, (i32 IntRegs:$src1)))]>; // Sign extend. let isPredicable = 1 in def SXTB : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = sxtb($src1)", - [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i8))]>; + [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i8))]>; let isPredicable = 1 in def SXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = sxth($src1)", - [(set IntRegs:$dst, (sext_inreg IntRegs:$src1, i16))]>; + [(set (i32 IntRegs:$dst), (sext_inreg (i32 IntRegs:$src1), i16))]>; // Zero extend. let isPredicable = 1, neverHasSideEffects = 1 in @@ -321,25 +351,25 @@ def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), // Conditional add. let neverHasSideEffects = 1, isPredicated = 1 in def ADD_ri_cPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3), + (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), "if ($src1) $dst = add($src2, #$src3)", []>; let neverHasSideEffects = 1, isPredicated = 1 in def ADD_ri_cNotPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3), + (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), "if (!$src1) $dst = add($src2, #$src3)", []>; let neverHasSideEffects = 1, isPredicated = 1 in def ADD_ri_cdnPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3), + (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), "if ($src1.new) $dst = add($src2, #$src3)", []>; let neverHasSideEffects = 1, isPredicated = 1 in def ADD_ri_cdnNotPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s16Imm:$src3), + (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), "if (!$src1.new) $dst = add($src2, #$src3)", []>; @@ -497,7 +527,6 @@ def SUB_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), // Conditional transfer. - let neverHasSideEffects = 1, isPredicated = 1 in def TFR_cPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2), "if ($src1) $dst = $src2", @@ -510,6 +539,18 @@ def TFR_cNotPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, []>; let neverHasSideEffects = 1, isPredicated = 1 in +def TFR64_cPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, + DoubleRegs:$src2), + "if ($src1) $dst = $src2", + []>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def TFR64_cNotPt : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, + DoubleRegs:$src2), + "if (!$src1) $dst = $src2", + []>; + +let neverHasSideEffects = 1, isPredicated = 1 in def TFRI_cPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2), "if ($src1) $dst = #$src2", []>; @@ -548,25 +589,14 @@ def TFRI_cdnNotPt : ALU32_ri<(outs IntRegs:$dst), (ins PredRegs:$src1, defm CMPGTU : CMP32_rr_ri_u9<"cmp.gtu", setugt>; defm CMPGT : CMP32_rr_ri_s10<"cmp.gt", setgt>; defm CMPLT : CMP32_rr<"cmp.lt", setlt>; +defm CMPLTU : CMP32_rr<"cmp.ltu", setult>; defm CMPEQ : CMP32_rr_ri_s10<"cmp.eq", seteq>; defm CMPGE : CMP32_ri_s8<"cmp.ge", setge>; -defm CMPGEU : CMP32_ri_u9<"cmp.geu", setuge>; +defm CMPGEU : CMP32_ri_u8<"cmp.geu", setuge>; //===----------------------------------------------------------------------===// // ALU32/PRED - //===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// ALU32/VH + -//===----------------------------------------------------------------------===// -// Vector add halfwords - -// Vector averagehalfwords - -// Vector subtract halfwords -//===----------------------------------------------------------------------===// -// ALU32/VH - -//===----------------------------------------------------------------------===// - //===----------------------------------------------------------------------===// // ALU64/ALU + @@ -575,8 +605,8 @@ defm CMPGEU : CMP32_ri_u9<"cmp.geu", setuge>; def ADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = add($src1, $src2)", - [(set DoubleRegs:$dst, (add DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (add (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; // Add halfword. @@ -589,40 +619,93 @@ defm CMPGTU64 : CMP64_rr<"cmp.gtu", setugt>; def AND_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = and($src1, $src2)", - [(set DoubleRegs:$dst, (and DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; def OR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = or($src1, $src2)", - [(set DoubleRegs:$dst, (or DoubleRegs:$src1, DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (or (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; def XOR_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = xor($src1, $src2)", - [(set DoubleRegs:$dst, (xor DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; // Maximum. def MAXw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = max($src2, $src1)", - [(set IntRegs:$dst, (select (i1 (setlt IntRegs:$src2, - IntRegs:$src1)), - IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 (setlt (i32 IntRegs:$src2), + (i32 IntRegs:$src1))), + (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>; + +def MAXUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + "$dst = maxu($src2, $src1)", + [(set (i32 IntRegs:$dst), + (i32 (select (i1 (setult (i32 IntRegs:$src2), + (i32 IntRegs:$src1))), + (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>; + +def MAXd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = max($src2, $src1)", + [(set (i64 DoubleRegs:$dst), + (i64 (select (i1 (setlt (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1))), + (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2))))]>; + +def MAXUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = maxu($src2, $src1)", + [(set (i64 DoubleRegs:$dst), + (i64 (select (i1 (setult (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1))), + (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2))))]>; // Minimum. def MINw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = min($src2, $src1)", - [(set IntRegs:$dst, (select (i1 (setgt IntRegs:$src2, - IntRegs:$src1)), - IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 (setgt (i32 IntRegs:$src2), + (i32 IntRegs:$src1))), + (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>; + +def MINUw_rr : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + "$dst = minu($src2, $src1)", + [(set (i32 IntRegs:$dst), + (i32 (select (i1 (setugt (i32 IntRegs:$src2), + (i32 IntRegs:$src1))), + (i32 IntRegs:$src1), (i32 IntRegs:$src2))))]>; + +def MINd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = min($src2, $src1)", + [(set (i64 DoubleRegs:$dst), + (i64 (select (i1 (setgt (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1))), + (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2))))]>; + +def MINUd_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = minu($src2, $src1)", + [(set (i64 DoubleRegs:$dst), + (i64 (select (i1 (setugt (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1))), + (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2))))]>; // Subtract. def SUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = sub($src1, $src2)", - [(set DoubleRegs:$dst, (sub DoubleRegs:$src1, - DoubleRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (sub (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))]>; // Subtract halfword. @@ -652,30 +735,6 @@ def TFR_64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// -// ALU64/VB + -//===----------------------------------------------------------------------===// -// -//===----------------------------------------------------------------------===// -// ALU64/VB - -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// ALU64/VH + -//===----------------------------------------------------------------------===// -// -//===----------------------------------------------------------------------===// -// ALU64/VH - -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// -// ALU64/VW + -//===----------------------------------------------------------------------===// -// -//===----------------------------------------------------------------------===// -// ALU64/VW - -//===----------------------------------------------------------------------===// - -//===----------------------------------------------------------------------===// // CR + //===----------------------------------------------------------------------===// // Logical reductions on predicates. @@ -687,7 +746,8 @@ def TFR_64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), // Logical operations on predicates. def AND_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2), "$dst = and($src1, $src2)", - [(set PredRegs:$dst, (and PredRegs:$src1, PredRegs:$src2))]>; + [(set (i1 PredRegs:$dst), (and (i1 PredRegs:$src1), + (i1 PredRegs:$src2)))]>; let neverHasSideEffects = 1 in def AND_pnotp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, @@ -726,15 +786,17 @@ def MASK_p : SInst<(outs DoubleRegs:$dst), (ins PredRegs:$src1), def NOT_p : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1), "$dst = not($src1)", - [(set PredRegs:$dst, (not PredRegs:$src1))]>; + [(set (i1 PredRegs:$dst), (not (i1 PredRegs:$src1)))]>; def OR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2), "$dst = or($src1, $src2)", - [(set PredRegs:$dst, (or PredRegs:$src1, PredRegs:$src2))]>; + [(set (i1 PredRegs:$dst), (or (i1 PredRegs:$src1), + (i1 PredRegs:$src2)))]>; def XOR_pp : SInst<(outs PredRegs:$dst), (ins PredRegs:$src1, PredRegs:$src2), "$dst = xor($src1, $src2)", - [(set PredRegs:$dst, (xor PredRegs:$src1, PredRegs:$src2))]>; + [(set (i1 PredRegs:$dst), (xor (i1 PredRegs:$src1), + (i1 PredRegs:$src2)))]>; // User control register transfer. @@ -760,7 +822,7 @@ let isBranch = 1, isTerminator=1, Defs = [PC], def JMP_c : JInst< (outs), (ins PredRegs:$src, brtarget:$offset), "if ($src) jump $offset", - [(brcond PredRegs:$src, bb:$offset)]>; + [(brcond (i1 PredRegs:$src), bb:$offset)]>; } // if (!p0) jump @@ -826,7 +888,7 @@ def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone, [SDNPHasChain, SDNPOptInGlue]>; // Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, +let isPredicable =1, isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { def JMPR: JRInst<(outs), (ins), "jumpr r31", @@ -834,7 +896,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, } // Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, +let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1, Defs = [PC], Uses = [R31] in { def JMPR_cPt: JRInst<(outs), (ins PredRegs:$src1), "if ($src1) jumpr r31", @@ -842,7 +904,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, } // Jump to address from register. -let isReturn = 1, isTerminator = 1, isBarrier = 1, +let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicated = 1, Defs = [PC], Uses = [R31] in { def JMPR_cNotPt: JRInst<(outs), (ins PredRegs:$src1), "if (!$src1) jumpr r31", @@ -865,96 +927,99 @@ let isPredicable = 1 in def LDrid : LDInst<(outs DoubleRegs:$dst), (ins MEMri:$addr), "$dst = memd($addr)", - [(set DoubleRegs:$dst, (load ADDRriS11_3:$addr))]>; + [(set (i64 DoubleRegs:$dst), (i64 (load ADDRriS11_3:$addr)))]>; let isPredicable = 1, AddedComplexity = 20 in def LDrid_indexed : LDInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, s11_3Imm:$offset), - "$dst=memd($src1+#$offset)", - [(set DoubleRegs:$dst, (load (add IntRegs:$src1, - s11_3ImmPred:$offset)))]>; + "$dst = memd($src1+#$offset)", + [(set (i64 DoubleRegs:$dst), + (i64 (load (add (i32 IntRegs:$src1), + s11_3ImmPred:$offset))))]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_GP : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1 in +def LDrid_GP : LDInst2<(outs DoubleRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memd(#$global+$offset)", - []>; + "$dst = memd(#$global+$offset)", + []>, + Requires<[NoV4T]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDd_GP : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1 in +def LDd_GP : LDInst2<(outs DoubleRegs:$dst), (ins globaladdress:$global), - "$dst=memd(#$global)", - []>; + "$dst = memd(#$global)", + []>, + Requires<[NoV4T]>; -let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrid : LDInstPI<(outs DoubleRegs:$dst, IntRegs:$dst2), +let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +def POST_LDrid : LDInst2PI<(outs DoubleRegs:$dst, IntRegs:$dst2), (ins IntRegs:$src1, s4Imm:$offset), "$dst = memd($src1++#$offset)", [], "$src1 = $dst2">; // Load doubleword conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_cPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_cPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_cNotPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_cNotPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_indexed_cPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_indexed_cPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if ($src1) $dst=memd($src2+#$src3)", + "if ($src1) $dst = memd($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_indexed_cNotPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_indexed_cNotPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if (!$src1) $dst=memd($src2+#$src3)", + "if (!$src1) $dst = memd($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrid_cPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrid_cPt : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3), "if ($src1) $dst1 = memd($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrid_cNotPt : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrid_cNotPt : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3), "if (!$src1) $dst1 = memd($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_cdnPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_cdnPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_cdnNotPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_cdnNotPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memd($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_indexed_cdnPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_indexed_cdnPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if ($src1.new) $dst=memd($src2+#$src3)", + "if ($src1.new) $dst = memd($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_indexed_cdnNotPt : LDInst<(outs DoubleRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_indexed_cdnNotPt : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3), - "if (!$src1.new) $dst=memd($src2+#$src3)", + "if (!$src1.new) $dst = memd($src2+#$src3)", []>; @@ -963,114 +1028,113 @@ let isPredicable = 1 in def LDrib : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memb($addr)", - [(set IntRegs:$dst, (sextloadi8 ADDRriS11_0:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (sextloadi8 ADDRriS11_0:$addr)))]>; -def LDrib_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memb($addr)", - [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>; +// Load byte any-extend. +def : Pat < (i32 (extloadi8 ADDRriS11_0:$addr)), + (i32 (LDrib ADDRriS11_0:$addr)) >; // Indexed load byte. let isPredicable = 1, AddedComplexity = 20 in def LDrib_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memb($src1+#$offset)", - [(set IntRegs:$dst, (sextloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; - + "$dst = memb($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (sextloadi8 (add (i32 IntRegs:$src1), + s11_0ImmPred:$offset))))]>; // Indexed load byte any-extend. let AddedComplexity = 20 in -def LDrib_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memb($src1+#$offset)", - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; +def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))), + (i32 (LDrib_indexed IntRegs:$src1, s11_0ImmPred:$offset)) >; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDrib_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memb(#$global+$offset)", - []>; + "$dst = memb(#$global+$offset)", + []>, + Requires<[NoV4T]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDb_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDb_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memb(#$global)", - []>; + "$dst = memb(#$global)", + []>, + Requires<[NoV4T]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDub_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDub_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memub(#$global)", - []>; + "$dst = memub(#$global)", + []>, + Requires<[NoV4T]>; -let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrib : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), +let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +def POST_LDrib : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2), (ins IntRegs:$src1, s4Imm:$offset), "$dst = memb($src1++#$offset)", [], "$src1 = $dst2">; // Load byte conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_indexed_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_indexed_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1) $dst = memb($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_indexed_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_indexed_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1) $dst = memb($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrib_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrib_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if ($src1) $dst1 = memb($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrib_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrib_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if (!$src1) $dst1 = memb($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memb($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_indexed_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_indexed_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1.new) $dst = memb($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1.new) $dst = memb($src2+#$src3)", []>; @@ -1081,112 +1145,110 @@ let isPredicable = 1 in def LDrih : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memh($addr)", - [(set IntRegs:$dst, (sextloadi16 ADDRriS11_1:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (sextloadi16 ADDRriS11_1:$addr)))]>; let isPredicable = 1, AddedComplexity = 20 in def LDrih_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memh($src1+#$offset)", - [(set IntRegs:$dst, (sextloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))] >; + "$dst = memh($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (sextloadi16 (add (i32 IntRegs:$src1), + s11_1ImmPred:$offset))))]>; -def LDrih_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memh($addr)", - [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>; +def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)), + (i32 (LDrih ADDRriS11_1:$addr))>; let AddedComplexity = 20 in -def LDrih_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memh($src1+#$offset)", - [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))] >; +def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))), + (i32 (LDrih_indexed IntRegs:$src1, s11_1ImmPred:$offset)) >; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDrih_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memh(#$global+$offset)", - []>; + "$dst = memh(#$global+$offset)", + []>, + Requires<[NoV4T]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDh_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDh_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memh(#$global)", - []>; + "$dst = memh(#$global)", + []>, + Requires<[NoV4T]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDuh_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDuh_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memuh(#$global)", - []>; - + "$dst = memuh(#$global)", + []>, + Requires<[NoV4T]>; -let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrih : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), +let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +def POST_LDrih : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2), (ins IntRegs:$src1, s4Imm:$offset), "$dst = memh($src1++#$offset)", [], "$src1 = $dst2">; // Load halfword conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_indexed_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_indexed_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1) $dst = memh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_indexed_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_indexed_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1) $dst = memh($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrih_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrih_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if ($src1) $dst1 = memh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDrih_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrih_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if (!$src1) $dst1 = memh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_indexed_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_indexed_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1.new) $dst = memh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1.new) $dst = memh($src2+#$src3)", []>; @@ -1196,113 +1258,96 @@ let isPredicable = 1 in def LDriub : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memub($addr)", - [(set IntRegs:$dst, (zextloadi8 ADDRriS11_0:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (zextloadi8 ADDRriS11_0:$addr)))]>; -let isPredicable = 1 in -def LDriubit : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memub($addr)", - [(set IntRegs:$dst, (zextloadi1 ADDRriS11_0:$addr))]>; +def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)), + (i32 (LDriub ADDRriS11_0:$addr))>; let isPredicable = 1, AddedComplexity = 20 in def LDriub_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memub($src1+#$offset)", - [(set IntRegs:$dst, (zextloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; - -let AddedComplexity = 20 in -def LDriubit_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memub($src1+#$offset)", - [(set IntRegs:$dst, (zextloadi1 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; - -def LDriub_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memub($addr)", - [(set IntRegs:$dst, (extloadi8 ADDRriS11_0:$addr))]>; - + "$dst = memub($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (zextloadi8 (add (i32 IntRegs:$src1), + s11_0ImmPred:$offset))))]>; let AddedComplexity = 20 in -def LDriub_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_0Imm:$offset), - "$dst=memub($src1+#$offset)", - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, - s11_0ImmPred:$offset)))]>; +def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))), + (i32 (LDriub_indexed IntRegs:$src1, s11_0ImmPred:$offset))>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDriub_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memub(#$global+$offset)", - []>; + "$dst = memub(#$global+$offset)", + []>, + Requires<[NoV4T]>; -let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriub : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), +let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +def POST_LDriub : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2), (ins IntRegs:$src1, s4Imm:$offset), "$dst = memub($src1++#$offset)", [], "$src1 = $dst2">; // Load unsigned byte conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_indexed_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_indexed_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1) $dst = memub($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_indexed_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_indexed_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1) $dst = memub($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriub_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriub_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if ($src1) $dst1 = memub($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriub_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriub_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if (!$src1) $dst1 = memub($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memub($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_indexed_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_indexed_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if ($src1.new) $dst = memub($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3), "if (!$src1.new) $dst = memub($src2+#$src3)", []>; @@ -1312,102 +1357,90 @@ let isPredicable = 1 in def LDriuh : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memuh($addr)", - [(set IntRegs:$dst, (zextloadi16 ADDRriS11_1:$addr))]>; + [(set (i32 IntRegs:$dst), (i32 (zextloadi16 ADDRriS11_1:$addr)))]>; // Indexed load unsigned halfword. let isPredicable = 1, AddedComplexity = 20 in def LDriuh_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memuh($src1+#$offset)", - [(set IntRegs:$dst, (zextloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))]>; - -def LDriuh_ae : LDInst<(outs IntRegs:$dst), - (ins MEMri:$addr), - "$dst = memuh($addr)", - [(set IntRegs:$dst, (extloadi16 ADDRriS11_1:$addr))]>; + "$dst = memuh($src1+#$offset)", + [(set (i32 IntRegs:$dst), + (i32 (zextloadi16 (add (i32 IntRegs:$src1), + s11_1ImmPred:$offset))))]>; - -// Indexed load unsigned halfword any-extend. -let AddedComplexity = 20 in -def LDriuh_ae_indexed : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, s11_1Imm:$offset), - "$dst=memuh($src1+#$offset)", - [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1, - s11_1ImmPred:$offset)))] >; - -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDriuh_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memuh(#$global+$offset)", - []>; + "$dst = memuh(#$global+$offset)", + []>, + Requires<[NoV4T]>; -let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriuh : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), +let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +def POST_LDriuh : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2), (ins IntRegs:$src1, s4Imm:$offset), "$dst = memuh($src1++#$offset)", [], "$src1 = $dst2">; // Load unsigned halfword conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_indexed_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_indexed_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1) $dst = memuh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_indexed_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_indexed_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1) $dst = memuh($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriuh_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriuh_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if ($src1) $dst1 = memuh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriuh_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriuh_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if (!$src1) $dst1 = memuh($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memuh($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_indexed_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_indexed_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if ($src1.new) $dst = memuh($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3), "if (!$src1.new) $dst = memuh($src2+#$src3)", []>; @@ -1417,10 +1450,10 @@ def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), let isPredicable = 1 in def LDriw : LDInst<(outs IntRegs:$dst), (ins MEMri:$addr), "$dst = memw($addr)", - [(set IntRegs:$dst, (load ADDRriS11_2:$addr))]>; + [(set IntRegs:$dst, (i32 (load ADDRriS11_2:$addr)))]>; // Load predicate. -let mayLoad = 1, Defs = [R10,R11] in +let Defs = [R10,R11,D5], neverHasSideEffects = 1 in def LDriw_pred : LDInst<(outs PredRegs:$dst), (ins MEMri:$addr), "Error; should not emit", @@ -1430,24 +1463,26 @@ def LDriw_pred : LDInst<(outs PredRegs:$dst), let isPredicable = 1, AddedComplexity = 20 in def LDriw_indexed : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s11_2Imm:$offset), - "$dst=memw($src1+#$offset)", - [(set IntRegs:$dst, (load (add IntRegs:$src1, - s11_2ImmPred:$offset)))]>; + "$dst = memw($src1+#$offset)", + [(set IntRegs:$dst, (i32 (load (add IntRegs:$src1, + s11_2ImmPred:$offset))))]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDriw_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global, u16Imm:$offset), - "$dst=memw(#$global+$offset)", - []>; + "$dst = memw(#$global+$offset)", + []>, + Requires<[NoV4T]>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDw_GP : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1 in +def LDw_GP : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst=memw(#$global)", - []>; + "$dst = memw(#$global)", + []>, + Requires<[NoV4T]>; -let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), +let isPredicable = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in +def POST_LDriw : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2), (ins IntRegs:$src1, s4Imm:$offset), "$dst = memw($src1++#$offset)", [], @@ -1455,71 +1490,71 @@ def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2), // Load word conditionally. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_indexed_cPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_indexed_cPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if ($src1) $dst=memw($src2+#$src3)", + "if ($src1) $dst = memw($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_indexed_cNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_indexed_cNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if (!$src1) $dst=memw($src2+#$src3)", + "if (!$src1) $dst = memw($src2+#$src3)", []>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriw_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriw_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3), "if ($src1) $dst1 = memw($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in -def POST_LDriw_cNotPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriw_cNotPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3), "if (!$src1) $dst1 = memw($src2++#$src3)", [], "$src2 = $dst2">; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if ($src1.new) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, MEMri:$addr), "if (!$src1.new) $dst = memw($addr)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_indexed_cdnPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_indexed_cdnPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if ($src1.new) $dst=memw($src2+#$src3)", + "if ($src1.new) $dst = memw($src2+#$src3)", []>; -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst), +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3), - "if (!$src1.new) $dst=memw($src2+#$src3)", + "if (!$src1.new) $dst = memw($src2+#$src3)", []>; // Deallocate stack frame. let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in { - def DEALLOCFRAME : LDInst<(outs), (ins i32imm:$amt1), + def DEALLOCFRAME : LDInst2<(outs), (ins i32imm:$amt1), "deallocframe", []>; } @@ -1550,13 +1585,14 @@ let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in { // Rd=+mpyi(Rs,#u8) def MPYI_riu : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2), "$dst =+ mpyi($src1, #$src2)", - [(set IntRegs:$dst, (mul IntRegs:$src1, u8ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + u8ImmPred:$src2))]>; // Rd=-mpyi(Rs,#u8) def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2), "$dst =- mpyi($src1, #$src2)", - [(set IntRegs:$dst, - (mul IntRegs:$src1, n8ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + n8ImmPred:$src2))]>; // Rd=mpyi(Rs,#m9) // s9 is NOT the same as m9 - but it works.. so far. @@ -1564,35 +1600,40 @@ def MPYI_rin : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, n8Imm:$src2), // depending on the value of m9. See Arch Spec. def MPYI_ri : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2), "$dst = mpyi($src1, #$src2)", - [(set IntRegs:$dst, (mul IntRegs:$src1, s9ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + s9ImmPred:$src2))]>; // Rd=mpyi(Rs,Rt) def MPYI : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpyi($src1, $src2)", - [(set IntRegs:$dst, (mul IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Rx+=mpyi(Rs,#u8) def MPYI_acc_ri : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3), "$dst += mpyi($src2, #$src3)", - [(set IntRegs:$dst, - (add (mul IntRegs:$src2, u8ImmPred:$src3), IntRegs:$src1))], + [(set (i32 IntRegs:$dst), + (add (mul (i32 IntRegs:$src2), u8ImmPred:$src3), + (i32 IntRegs:$src1)))], "$src1 = $dst">; // Rx+=mpyi(Rs,Rt) def MPYI_acc_rr : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpyi($src2, $src3)", - [(set IntRegs:$dst, - (add (mul IntRegs:$src2, IntRegs:$src3), IntRegs:$src1))], + [(set (i32 IntRegs:$dst), + (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)), + (i32 IntRegs:$src1)))], "$src1 = $dst">; // Rx-=mpyi(Rs,#u8) def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u8Imm:$src3), "$dst -= mpyi($src2, #$src3)", - [(set IntRegs:$dst, - (sub IntRegs:$src1, (mul IntRegs:$src2, u8ImmPred:$src3)))], + [(set (i32 IntRegs:$dst), + (sub (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2), + u8ImmPred:$src3)))], "$src1 = $dst">; // Multiply and use upper result. @@ -1601,27 +1642,30 @@ def MPYI_sub_ri : MInst_acc<(outs IntRegs:$dst), // Rd=mpy(Rs,Rt) def MPY : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpy($src1, $src2)", - [(set IntRegs:$dst, (mulhs IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (mulhs (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Rd=mpy(Rs,Rt):rnd // Rd=mpyu(Rs,Rt) def MPYU : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpyu($src1, $src2)", - [(set IntRegs:$dst, (mulhu IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (mulhu (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; // Multiply and use full result. // Rdd=mpyu(Rs,Rt) def MPYU64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpyu($src1, $src2)", - [(set DoubleRegs:$dst, (mul (i64 (anyext IntRegs:$src1)), - (i64 (anyext IntRegs:$src2))))]>; + [(set (i64 DoubleRegs:$dst), + (mul (i64 (anyext (i32 IntRegs:$src1))), + (i64 (anyext (i32 IntRegs:$src2)))))]>; // Rdd=mpy(Rs,Rt) def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = mpy($src1, $src2)", - [(set DoubleRegs:$dst, (mul (i64 (sext IntRegs:$src1)), - (i64 (sext IntRegs:$src2))))]>; - + [(set (i64 DoubleRegs:$dst), + (mul (i64 (sext (i32 IntRegs:$src1))), + (i64 (sext (i32 IntRegs:$src2)))))]>; // Multiply and accumulate, use full result. // Rxx[+-]=mpy(Rs,Rt) @@ -1629,18 +1673,20 @@ def MPY64 : MInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), def MPY64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpy($src2, $src3)", - [(set DoubleRegs:$dst, - (add (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3))), - DoubleRegs:$src1))], + [(set (i64 DoubleRegs:$dst), + (add (mul (i64 (sext (i32 IntRegs:$src2))), + (i64 (sext (i32 IntRegs:$src3)))), + (i64 DoubleRegs:$src1)))], "$src1 = $dst">; // Rxx-=mpy(Rs,Rt) def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst -= mpy($src2, $src3)", - [(set DoubleRegs:$dst, - (sub DoubleRegs:$src1, - (mul (i64 (sext IntRegs:$src2)), (i64 (sext IntRegs:$src3)))))], + [(set (i64 DoubleRegs:$dst), + (sub (i64 DoubleRegs:$src1), + (mul (i64 (sext (i32 IntRegs:$src2))), + (i64 (sext (i32 IntRegs:$src3))))))], "$src1 = $dst">; // Rxx[+-]=mpyu(Rs,Rt) @@ -1648,47 +1694,52 @@ def MPY64_sub : MInst_acc<(outs DoubleRegs:$dst), def MPYU64_acc : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpyu($src2, $src3)", - [(set DoubleRegs:$dst, (add (mul (i64 (anyext IntRegs:$src2)), - (i64 (anyext IntRegs:$src3))), - DoubleRegs:$src1))],"$src1 = $dst">; + [(set (i64 DoubleRegs:$dst), + (add (mul (i64 (anyext (i32 IntRegs:$src2))), + (i64 (anyext (i32 IntRegs:$src3)))), + (i64 DoubleRegs:$src1)))], "$src1 = $dst">; // Rxx-=mpyu(Rs,Rt) def MPYU64_sub : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += mpyu($src2, $src3)", - [(set DoubleRegs:$dst, - (sub DoubleRegs:$src1, - (mul (i64 (anyext IntRegs:$src2)), - (i64 (anyext IntRegs:$src3)))))], + [(set (i64 DoubleRegs:$dst), + (sub (i64 DoubleRegs:$src1), + (mul (i64 (anyext (i32 IntRegs:$src2))), + (i64 (anyext (i32 IntRegs:$src3))))))], "$src1 = $dst">; def ADDrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst += add($src2, $src3)", - [(set IntRegs:$dst, (add (add IntRegs:$src2, IntRegs:$src3), - IntRegs:$src1))], + [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2), + (i32 IntRegs:$src3)), + (i32 IntRegs:$src1)))], "$src1 = $dst">; def ADDri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, s8Imm:$src3), "$dst += add($src2, #$src3)", - [(set IntRegs:$dst, (add (add IntRegs:$src2, s8ImmPred:$src3), - IntRegs:$src1))], + [(set (i32 IntRegs:$dst), (add (add (i32 IntRegs:$src2), + s8ImmPred:$src3), + (i32 IntRegs:$src1)))], "$src1 = $dst">; def SUBrr_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst -= add($src2, $src3)", - [(set IntRegs:$dst, (sub IntRegs:$src1, (add IntRegs:$src2, - IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (sub (i32 IntRegs:$src1), (add (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">; def SUBri_acc : MInst_acc<(outs IntRegs: $dst), (ins IntRegs:$src1, IntRegs:$src2, s8Imm:$src3), "$dst -= add($src2, #$src3)", - [(set IntRegs:$dst, (sub IntRegs:$src1, - (add IntRegs:$src2, s8ImmPred:$src3)))], + [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1), + (add (i32 IntRegs:$src2), + s8ImmPred:$src3)))], "$src1 = $dst">; //===----------------------------------------------------------------------===// @@ -1731,57 +1782,70 @@ let isPredicable = 1 in def STrid : STInst<(outs), (ins MEMri:$addr, DoubleRegs:$src1), "memd($addr) = $src1", - [(store DoubleRegs:$src1, ADDRriS11_3:$addr)]>; + [(store (i64 DoubleRegs:$src1), ADDRriS11_3:$addr)]>; // Indexed store double word. let AddedComplexity = 10, isPredicable = 1 in def STrid_indexed : STInst<(outs), (ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3), "memd($src1+#$src2) = $src3", - [(store DoubleRegs:$src3, - (add IntRegs:$src1, s11_3ImmPred:$src2))]>; + [(store (i64 DoubleRegs:$src3), + (add (i32 IntRegs:$src1), s11_3ImmPred:$src2))]>; -let mayStore = 1, neverHasSideEffects = 1 in -def STrid_GP : STInst<(outs), +let neverHasSideEffects = 1 in +def STrid_GP : STInst2<(outs), (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src), "memd(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; + +let neverHasSideEffects = 1 in +def STd_GP : STInst2<(outs), + (ins globaladdress:$global, DoubleRegs:$src), + "memd(#$global) = $src", + []>, + Requires<[NoV4T]>; let hasCtrlDep = 1, isPredicable = 1 in def POST_STdri : STInstPI<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2, s4Imm:$offset), "memd($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_store DoubleRegs:$src1, IntRegs:$src2, s4_3ImmPred:$offset))], + (post_store (i64 DoubleRegs:$src1), (i32 IntRegs:$src2), + s4_3ImmPred:$offset))], "$src2 = $dst">; // Store doubleword conditionally. // if ([!]Pv) memd(Rs+#u6:3)=Rtt // if (Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_cPt : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_cPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if ($src1) memd($addr) = $src2", []>; // if (!Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_cNotPt : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_cNotPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if (!$src1) memd($addr) = $src2", []>; // if (Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_cPt : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_cPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), "if ($src1) memd($src2+#$src3) = $src4", []>; // if (!Pv) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_cNotPt : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_cNotPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), "if (!$src1) memd($src2+#$src3) = $src4", @@ -1789,8 +1853,9 @@ def STrid_indexed_cNotPt : STInst<(outs), // if ([!]Pv) memd(Rx++#s4:3)=Rtt // if (Pv) memd(Rx++#s4:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def POST_STdri_cPt : STInstPI<(outs IntRegs:$dst), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def POST_STdri_cPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3, s4_3Imm:$offset), "if ($src1) memd($src3++#$offset) = $src2", @@ -1798,9 +1863,9 @@ def POST_STdri_cPt : STInstPI<(outs IntRegs:$dst), "$src3 = $dst">; // if (!Pv) memd(Rx++#s4:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, +let AddedComplexity = 10, neverHasSideEffects = 1, isPredicated = 1, isPredicated = 1 in -def POST_STdri_cNotPt : STInstPI<(outs IntRegs:$dst), +def POST_STdri_cNotPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3, s4_3Imm:$offset), "if (!$src1) memd($src3++#$offset) = $src2", @@ -1814,27 +1879,30 @@ let isPredicable = 1 in def STrib : STInst<(outs), (ins MEMri:$addr, IntRegs:$src1), "memb($addr) = $src1", - [(truncstorei8 IntRegs:$src1, ADDRriS11_0:$addr)]>; + [(truncstorei8 (i32 IntRegs:$src1), ADDRriS11_0:$addr)]>; let AddedComplexity = 10, isPredicable = 1 in def STrib_indexed : STInst<(outs), (ins IntRegs:$src1, s11_0Imm:$src2, IntRegs:$src3), "memb($src1+#$src2) = $src3", - [(truncstorei8 IntRegs:$src3, (add IntRegs:$src1, - s11_0ImmPred:$src2))]>; + [(truncstorei8 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1), + s11_0ImmPred:$src2))]>; // memb(gp+#u16:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_GP : STInst<(outs), +let neverHasSideEffects = 1 in +def STrib_GP : STInst2<(outs), (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), "memb(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; -let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP : STInst<(outs), +// memb(#global)=Rt +let neverHasSideEffects = 1 in +def STb_GP : STInst2<(outs), (ins globaladdress:$global, IntRegs:$src), "memb(#$global) = $src", - []>; + []>, + Requires<[NoV4T]>; // memb(Rx++#s4:0)=Rt let hasCtrlDep = 1, isPredicable = 1 in @@ -1843,51 +1911,51 @@ def POST_STbri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1, s4Imm:$offset), "memb($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_truncsti8 IntRegs:$src1, IntRegs:$src2, + (post_truncsti8 (i32 IntRegs:$src1), (i32 IntRegs:$src2), s4_0ImmPred:$offset))], "$src2 = $dst">; // Store byte conditionally. // if ([!]Pv) memb(Rs+#u6:0)=Rt // if (Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_cPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_cPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memb($addr) = $src2", []>; // if (!Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_cNotPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_cNotPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memb($addr) = $src2", []>; // if (Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_indexed_cPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_indexed_cPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1) memb($src2+#$src3) = $src4", []>; // if (!Pv) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_indexed_cNotPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_indexed_cNotPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1) memb($src2+#$src3) = $src4", []>; // if ([!]Pv) memb(Rx++#s4:0)=Rt // if (Pv) memb(Rx++#s4:0)=Rt -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in -def POST_STbri_cPt : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, isPredicated = 1 in +def POST_STbri_cPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if ($src1) memb($src3++#$offset) = $src2", [],"$src3 = $dst">; // if (!Pv) memb(Rx++#s4:0)=Rt -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in -def POST_STbri_cNotPt : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, isPredicated = 1 in +def POST_STbri_cNotPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if (!$src1) memb($src3++#$offset) = $src2", [],"$src3 = $dst">; @@ -1899,27 +1967,29 @@ let isPredicable = 1 in def STrih : STInst<(outs), (ins MEMri:$addr, IntRegs:$src1), "memh($addr) = $src1", - [(truncstorei16 IntRegs:$src1, ADDRriS11_1:$addr)]>; + [(truncstorei16 (i32 IntRegs:$src1), ADDRriS11_1:$addr)]>; let AddedComplexity = 10, isPredicable = 1 in def STrih_indexed : STInst<(outs), (ins IntRegs:$src1, s11_1Imm:$src2, IntRegs:$src3), "memh($src1+#$src2) = $src3", - [(truncstorei16 IntRegs:$src3, (add IntRegs:$src1, - s11_1ImmPred:$src2))]>; + [(truncstorei16 (i32 IntRegs:$src3), (add (i32 IntRegs:$src1), + s11_1ImmPred:$src2))]>; -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_GP : STInst<(outs), +let neverHasSideEffects = 1 in +def STrih_GP : STInst2<(outs), (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), "memh(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; -let mayStore = 1, neverHasSideEffects = 1 in -def STh_GP : STInst<(outs), +let neverHasSideEffects = 1 in +def STh_GP : STInst2<(outs), (ins globaladdress:$global, IntRegs:$src), "memh(#$global) = $src", - []>; + []>, + Requires<[NoV4T]>; // memh(Rx++#s4:1)=Rt.H // memh(Rx++#s4:1)=Rt @@ -1928,51 +1998,51 @@ def POST_SThri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset), "memh($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_truncsti16 IntRegs:$src1, IntRegs:$src2, + (post_truncsti16 (i32 IntRegs:$src1), (i32 IntRegs:$src2), s4_1ImmPred:$offset))], "$src2 = $dst">; // Store halfword conditionally. // if ([!]Pv) memh(Rs+#u6:1)=Rt // if (Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_cPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_cPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memh($addr) = $src2", []>; // if (!Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_cNotPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_cNotPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memh($addr) = $src2", []>; // if (Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_indexed_cPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_indexed_cPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1) memh($src2+#$src3) = $src4", []>; // if (!Pv) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_indexed_cNotPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_indexed_cNotPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1) memh($src2+#$src3) = $src4", []>; // if ([!]Pv) memh(Rx++#s4:1)=Rt // if (Pv) memh(Rx++#s4:1)=Rt -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in -def POST_SThri_cPt : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, isPredicated = 1 in +def POST_SThri_cPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if ($src1) memh($src3++#$offset) = $src2", [],"$src3 = $dst">; // if (!Pv) memh(Rx++#s4:1)=Rt -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in -def POST_SThri_cNotPt : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, isPredicated = 1 in +def POST_SThri_cNotPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if (!$src1) memh($src3++#$offset) = $src2", [],"$src3 = $dst">; @@ -1980,8 +2050,8 @@ def POST_SThri_cNotPt : STInstPI<(outs IntRegs:$dst), // Store word. // Store predicate. -let Defs = [R10,R11] in -def STriw_pred : STInst<(outs), +let Defs = [R10,R11,D5], neverHasSideEffects = 1 in +def STriw_pred : STInst2<(outs), (ins MEMri:$addr, PredRegs:$src1), "Error; should not emit", []>; @@ -1991,69 +2061,79 @@ let isPredicable = 1 in def STriw : STInst<(outs), (ins MEMri:$addr, IntRegs:$src1), "memw($addr) = $src1", - [(store IntRegs:$src1, ADDRriS11_2:$addr)]>; + [(store (i32 IntRegs:$src1), ADDRriS11_2:$addr)]>; let AddedComplexity = 10, isPredicable = 1 in def STriw_indexed : STInst<(outs), (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3), "memw($src1+#$src2) = $src3", - [(store IntRegs:$src3, (add IntRegs:$src1, s11_2ImmPred:$src2))]>; + [(store (i32 IntRegs:$src3), + (add (i32 IntRegs:$src1), s11_2ImmPred:$src2))]>; -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_GP : STInst<(outs), +let neverHasSideEffects = 1 in +def STriw_GP : STInst2<(outs), (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), "memw(#$global+$offset) = $src", - []>; + []>, + Requires<[NoV4T]>; + +let neverHasSideEffects = 1 in +def STw_GP : STInst2<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memw(#$global) = $src", + []>, + Requires<[NoV4T]>; let hasCtrlDep = 1, isPredicable = 1 in def POST_STwri : STInstPI<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s4Imm:$offset), "memw($src2++#$offset) = $src1", [(set IntRegs:$dst, - (post_store IntRegs:$src1, IntRegs:$src2, s4_2ImmPred:$offset))], + (post_store (i32 IntRegs:$src1), (i32 IntRegs:$src2), + s4_2ImmPred:$offset))], "$src2 = $dst">; // Store word conditionally. // if ([!]Pv) memw(Rs+#u6:2)=Rt // if (Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_cPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_cPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memw($addr) = $src2", []>; // if (!Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_cNotPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_cNotPt : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memw($addr) = $src2", []>; // if (Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_indexed_cPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_indexed_cPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1) memw($src2+#$src3) = $src4", []>; // if (!Pv) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_indexed_cNotPt : STInst<(outs), +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_indexed_cNotPt : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1) memw($src2+#$src3) = $src4", []>; // if ([!]Pv) memw(Rx++#s4:2)=Rt // if (Pv) memw(Rx++#s4:2)=Rt -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in -def POST_STwri_cPt : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, isPredicated = 1 in +def POST_STwri_cPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if ($src1) memw($src3++#$offset) = $src2", [],"$src3 = $dst">; // if (!Pv) memw(Rx++#s4:2)=Rt -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in -def POST_STwri_cNotPt : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, isPredicated = 1 in +def POST_STwri_cNotPt : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if (!$src1) memw($src3++#$offset) = $src2", [],"$src3 = $dst">; @@ -2062,7 +2142,7 @@ def POST_STwri_cNotPt : STInstPI<(outs IntRegs:$dst), // Allocate stack frame. let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in { - def ALLOCFRAME : STInst<(outs), + def ALLOCFRAME : STInst2<(outs), (ins i32imm:$amt), "allocframe(#$amt)", []>; @@ -2077,13 +2157,13 @@ let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in { // Logical NOT. def NOT_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), "$dst = not($src1)", - [(set DoubleRegs:$dst, (not DoubleRegs:$src1))]>; + [(set (i64 DoubleRegs:$dst), (not (i64 DoubleRegs:$src1)))]>; // Sign extend word to doubleword. def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1), "$dst = sxtw($src1)", - [(set DoubleRegs:$dst, (sext IntRegs:$src1))]>; + [(set (i64 DoubleRegs:$dst), (sext (i32 IntRegs:$src1)))]>; //===----------------------------------------------------------------------===// // STYPE/ALU - //===----------------------------------------------------------------------===// @@ -2091,37 +2171,58 @@ def SXTW : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src1), //===----------------------------------------------------------------------===// // STYPE/BIT + //===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// STYPE/BIT - -//===----------------------------------------------------------------------===// +// clrbit. +def CLRBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = clrbit($src1, #$src2)", + [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), + (not + (shl 1, u5ImmPred:$src2))))]>; +def CLRBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = clrbit($src1, #$src2)", + []>; -//===----------------------------------------------------------------------===// -// STYPE/COMPLEX + -//===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// STYPE/COMPLEX - -//===----------------------------------------------------------------------===// +// Map from r0 = and(r1, 2147483647) to r0 = clrbit(r1, #31). +def : Pat <(and (i32 IntRegs:$src1), 2147483647), + (CLRBIT_31 (i32 IntRegs:$src1), 31)>; -//===----------------------------------------------------------------------===// -// STYPE/PERM + -//===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// STYPE/PERM - -//===----------------------------------------------------------------------===// +// setbit. +def SETBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = setbit($src1, #$src2)", + [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), + (shl 1, u5ImmPred:$src2)))]>; + +// Map from r0 = or(r1, -2147483648) to r0 = setbit(r1, #31). +def SETBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = setbit($src1, #$src2)", + []>; + +def : Pat <(or (i32 IntRegs:$src1), -2147483648), + (SETBIT_31 (i32 IntRegs:$src1), 31)>; + +// togglebit. +def TOGBIT : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = setbit($src1, #$src2)", + [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1), + (shl 1, u5ImmPred:$src2)))]>; + +// Map from r0 = xor(r1, -2147483648) to r0 = togglebit(r1, #31). +def TOGBIT_31 : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + "$dst = togglebit($src1, #$src2)", + []>; + +def : Pat <(xor (i32 IntRegs:$src1), -2147483648), + (TOGBIT_31 (i32 IntRegs:$src1), 31)>; -//===----------------------------------------------------------------------===// -// STYPE/PRED + -//===----------------------------------------------------------------------===// // Predicate transfer. let neverHasSideEffects = 1 in def TFR_RsPd : SInst<(outs IntRegs:$dst), (ins PredRegs:$src1), - "$dst = $src1 // Should almost never emit this", + "$dst = $src1 /* Should almost never emit this. */", []>; def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1), - "$dst = $src1 // Should almost never emit!", - [(set PredRegs:$dst, (trunc IntRegs:$src1))]>; + "$dst = $src1 /* Should almost never emit this. */", + [(set (i1 PredRegs:$dst), (trunc (i32 IntRegs:$src1)))]>; //===----------------------------------------------------------------------===// // STYPE/PRED - //===----------------------------------------------------------------------===// @@ -2132,75 +2233,85 @@ def TFR_PdRs : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1), // Shift by immediate. def ASR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), "$dst = asr($src1, #$src2)", - [(set IntRegs:$dst, (sra IntRegs:$src1, u5ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1), + u5ImmPred:$src2))]>; def ASRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), "$dst = asr($src1, #$src2)", - [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, u6ImmPred:$src2))]>; + [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1), + u6ImmPred:$src2))]>; def ASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), "$dst = asl($src1, #$src2)", - [(set IntRegs:$dst, (shl IntRegs:$src1, u5ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1), + u5ImmPred:$src2))]>; + +def ASLd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), + "$dst = asl($src1, #$src2)", + [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1), + u6ImmPred:$src2))]>; def LSR_ri : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), "$dst = lsr($src1, #$src2)", - [(set IntRegs:$dst, (srl IntRegs:$src1, u5ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1), + u5ImmPred:$src2))]>; def LSRd_ri : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), "$dst = lsr($src1, #$src2)", - [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, u6ImmPred:$src2))]>; - -def LSRd_ri_acc : SInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, - DoubleRegs:$src2, - u6Imm:$src3), - "$dst += lsr($src2, #$src3)", - [(set DoubleRegs:$dst, (add DoubleRegs:$src1, - (srl DoubleRegs:$src2, - u6ImmPred:$src3)))], - "$src1 = $dst">; - -// Shift by immediate and accumulate. -def ASR_rr_acc : SInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, - IntRegs:$src2, - IntRegs:$src3), - "$dst += asr($src2, $src3)", - [], "$src1 = $dst">; + [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1), + u6ImmPred:$src2))]>; // Shift by immediate and add. +let AddedComplexity = 100 in def ADDASL : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u3Imm:$src3), "$dst = addasl($src1, $src2, #$src3)", - [(set IntRegs:$dst, (add IntRegs:$src1, - (shl IntRegs:$src2, - u3ImmPred:$src3)))]>; + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u3ImmPred:$src3)))]>; // Shift by register. def ASL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = asl($src1, $src2)", - [(set IntRegs:$dst, (shl IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; def ASR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = asr($src1, $src2)", - [(set IntRegs:$dst, (sra IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (sra (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; +def LSL_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + "$dst = lsl($src1, $src2)", + [(set (i32 IntRegs:$dst), (shl (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; def LSR_rr : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = lsr($src1, $src2)", - [(set IntRegs:$dst, (srl IntRegs:$src1, IntRegs:$src2))]>; + [(set (i32 IntRegs:$dst), (srl (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; + +def ASLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), + "$dst = asl($src1, $src2)", + [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; def LSLd : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), "$dst = lsl($src1, $src2)", - [(set DoubleRegs:$dst, (shl DoubleRegs:$src1, IntRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (shl (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; def ASRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), "$dst = asr($src1, $src2)", - [(set DoubleRegs:$dst, (sra DoubleRegs:$src1, IntRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (sra (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; def LSRd_rr : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), "$dst = lsr($src1, $src2)", - [(set DoubleRegs:$dst, (srl DoubleRegs:$src1, IntRegs:$src2))]>; + [(set (i64 DoubleRegs:$dst), (srl (i64 DoubleRegs:$src1), + (i32 IntRegs:$src2)))]>; //===----------------------------------------------------------------------===// // STYPE/SHIFT - @@ -2231,8 +2342,8 @@ def SDHexagonBARRIER: SDTypeProfile<0, 0, []>; def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDHexagonBARRIER, [SDNPHasChain]>; -let hasSideEffects = 1 in -def BARRIER : STInst<(outs), (ins), +let hasSideEffects = 1, isHexagonSolo = 1 in +def BARRIER : SYSInst<(outs), (ins), "barrier", [(HexagonBARRIER)]>; @@ -2244,47 +2355,50 @@ def BARRIER : STInst<(outs), (ins), let isReMaterializable = 1 in def TFRI64 : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1), "$dst = #$src1", - [(set DoubleRegs:$dst, s8Imm64Pred:$src1)]>; + [(set (i64 DoubleRegs:$dst), s8Imm64Pred:$src1)]>; // Pseudo instruction to encode a set of conditional transfers. // This instruction is used instead of a mux and trades-off codesize // for performance. We conduct this transformation optimistically in // the hope that these instructions get promoted to dot-new transfers. -let AddedComplexity = 100 in +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_rr : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "Error; should not emit", - [(set IntRegs:$dst, (select PredRegs:$src1, IntRegs:$src2, - IntRegs:$src3))]>; - -let AddedComplexity = 100 in + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), + (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))]>; +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3), "Error; should not emit", - [(set IntRegs:$dst, - (select PredRegs:$src1, IntRegs:$src2, s12ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2), + s12ImmPred:$src3)))]>; -let AddedComplexity = 100 in +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3), "Error; should not emit", - [(set IntRegs:$dst, - (select PredRegs:$src1, s12ImmPred:$src2, IntRegs:$src3))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2, + (i32 IntRegs:$src3))))]>; -let AddedComplexity = 100 in +let AddedComplexity = 100, isPredicated = 1 in def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3), "Error; should not emit", - [(set IntRegs:$dst, (select PredRegs:$src1, - s12ImmPred:$src2, - s12ImmPred:$src3))]>; + [(set (i32 IntRegs:$dst), + (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2, + s12ImmPred:$src3)))]>; // Generate frameindex addresses. let isReMaterializable = 1 in def TFR_FI : ALU32_ri<(outs IntRegs:$dst), (ins FrameIndex:$src1), "$dst = add($src1)", - [(set IntRegs:$dst, ADDRri:$src1)]>; + [(set (i32 IntRegs:$dst), ADDRri:$src1)]>; // // CR - Type. @@ -2303,69 +2417,116 @@ def LOOP0_r : CRInst<(outs), (ins brtarget:$offset, IntRegs:$src2), let isBranch = 1, isTerminator = 1, neverHasSideEffects = 1, Defs = [PC, LC0], Uses = [SA0, LC0] in { -def ENDLOOP0 : CRInst<(outs), (ins brtarget:$offset), +def ENDLOOP0 : Marker<(outs), (ins brtarget:$offset), ":endloop0", []>; } // Support for generating global address. // Taken from X86InstrInfo.td. -def SDTHexagonCONST32 : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, - SDTCisPtrTy<0>]>; +def SDTHexagonCONST32 : SDTypeProfile<1, 1, [ + SDTCisVT<0, i32>, + SDTCisVT<1, i32>, + SDTCisPtrTy<0>]>; def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>; def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>; +// HI/LO Instructions +let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in +def LO : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.l = #LO($global)", + []>; + +let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in +def HI : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.h = #HI($global)", + []>; + +let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in +def LOi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value), + "$dst.l = #LO($imm_value)", + []>; + + +let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in +def HIi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value), + "$dst.h = #HI($imm_value)", + []>; + +let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in +def LO_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt), + "$dst.l = #LO($jt)", + []>; + +let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in +def HI_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt), + "$dst.h = #HI($jt)", + []>; + + +let isReMaterializable = 1, isMoveImm = 1, neverHasSideEffects = 1 in +def LO_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label), + "$dst.l = #LO($label)", + []>; + +let isReMaterializable = 1, isMoveImm = 1 , neverHasSideEffects = 1 in +def HI_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label), + "$dst.h = #HI($label)", + []>; + // This pattern is incorrect. When we add small data, we should change // this pattern to use memw(#foo). +// This is for sdata. let isMoveImm = 1 in def CONST32 : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, - (load (HexagonCONST32 tglobaltlsaddr:$global)))]>; + [(set (i32 IntRegs:$dst), + (load (HexagonCONST32 tglobaltlsaddr:$global)))]>; +// This is for non-sdata. let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), +def CONST32_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, - (HexagonCONST32 tglobaladdr:$global))]>; + [(set (i32 IntRegs:$dst), + (HexagonCONST32 tglobaladdr:$global))]>; let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_set_jt : LDInst<(outs IntRegs:$dst), (ins jumptablebase:$jt), +def CONST32_set_jt : LDInst2<(outs IntRegs:$dst), (ins jumptablebase:$jt), "$dst = CONST32(#$jt)", - [(set IntRegs:$dst, - (HexagonCONST32 tjumptable:$jt))]>; + [(set (i32 IntRegs:$dst), + (HexagonCONST32 tjumptable:$jt))]>; let isReMaterializable = 1, isMoveImm = 1 in -def CONST32GP_set : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), +def CONST32GP_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, - (HexagonCONST32_GP tglobaladdr:$global))]>; + [(set (i32 IntRegs:$dst), + (HexagonCONST32_GP tglobaladdr:$global))]>; let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_Int_Real : LDInst<(outs IntRegs:$dst), (ins i32imm:$global), +def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins i32imm:$global), "$dst = CONST32(#$global)", - [(set IntRegs:$dst, imm:$global) ]>; + [(set (i32 IntRegs:$dst), imm:$global) ]>; let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_Label : LDInst<(outs IntRegs:$dst), (ins bblabel:$label), +def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins bblabel:$label), "$dst = CONST32($label)", - [(set IntRegs:$dst, (HexagonCONST32 bbl:$label))]>; + [(set (i32 IntRegs:$dst), (HexagonCONST32 bbl:$label))]>; let isReMaterializable = 1, isMoveImm = 1 in -def CONST64_Int_Real : LDInst<(outs DoubleRegs:$dst), (ins i64imm:$global), +def CONST64_Int_Real : LDInst2<(outs DoubleRegs:$dst), (ins i64imm:$global), "$dst = CONST64(#$global)", - [(set DoubleRegs:$dst, imm:$global) ]>; + [(set (i64 DoubleRegs:$dst), imm:$global) ]>; def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins), "$dst = xor($dst, $dst)", - [(set PredRegs:$dst, 0)]>; + [(set (i1 PredRegs:$dst), 0)]>; def MPY_trsext : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), - "$dst = mpy($src1, $src2)", - [(set IntRegs:$dst, - (trunc (i64 (srl (i64 (mul (i64 (sext IntRegs:$src1)), - (i64 (sext IntRegs:$src2)))), - (i32 32)))))]>; + "$dst = mpy($src1, $src2)", + [(set (i32 IntRegs:$dst), + (trunc (i64 (srl (i64 (mul (i64 (sext (i32 IntRegs:$src1))), + (i64 (sext (i32 IntRegs:$src2))))), + (i32 32)))))]>; // Pseudo instructions. def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; @@ -2405,7 +2566,7 @@ let Defs = [R29, R30, R31], Uses = [R29] in { let isCall = 1, neverHasSideEffects = 1, Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def CALL : JInst<(outs), (ins calltarget:$dst, variable_ops), + def CALL : JInst<(outs), (ins calltarget:$dst), "call $dst", []>; } @@ -2413,34 +2574,28 @@ let isCall = 1, neverHasSideEffects = 1, let isCall = 1, neverHasSideEffects = 1, Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def CALLR : JRInst<(outs), (ins IntRegs:$dst, variable_ops), + def CALLR : JRInst<(outs), (ins IntRegs:$dst), "callr $dst", []>; } // Tail Calls. -let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1, - Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, - R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def TCRETURNtg : JInst<(outs), (ins calltarget:$dst, variable_ops), +let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in { + def TCRETURNtg : JInst<(outs), (ins calltarget:$dst), "jump $dst // TAILCALL", []>; } -let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1, - Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, - R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def TCRETURNtext : JInst<(outs), (ins calltarget:$dst, variable_ops), +let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in { + def TCRETURNtext : JInst<(outs), (ins calltarget:$dst), "jump $dst // TAILCALL", []>; } -let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1, - Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, - R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def TCRETURNR : JInst<(outs), (ins IntRegs:$dst, variable_ops), +let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1 in { + def TCRETURNR : JInst<(outs), (ins IntRegs:$dst), "jumpr $dst // TAILCALL", []>; } // Map call instruction. -def : Pat<(call IntRegs:$dst), - (CALLR IntRegs:$dst)>, Requires<[HasV2TOnly]>; +def : Pat<(call (i32 IntRegs:$dst)), + (CALLR (i32 IntRegs:$dst))>, Requires<[HasV2TOnly]>; def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>, Requires<[HasV2TOnly]>; def : Pat<(call texternalsym:$dst), @@ -2450,309 +2605,516 @@ def : Pat<(HexagonTCRet tglobaladdr:$dst), (TCRETURNtg tglobaladdr:$dst)>; def : Pat<(HexagonTCRet texternalsym:$dst), (TCRETURNtext texternalsym:$dst)>; -def : Pat<(HexagonTCRet IntRegs:$dst), - (TCRETURNR IntRegs:$dst)>; +def : Pat<(HexagonTCRet (i32 IntRegs:$dst)), + (TCRETURNR (i32 IntRegs:$dst))>; + +// Atomic load and store support +// 8 bit atomic load +def : Pat<(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDub_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_8 ADDRriS11_0:$src1), + (i32 (LDriub ADDRriS11_0:$src1))>; + +def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)), + (i32 (LDriub_indexed (i32 IntRegs:$src1), s11_0ImmPred:$offset))>; + + + +// 16 bit atomic load +def : Pat<(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDuh_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_16 ADDRriS11_1:$src1), + (i32 (LDriuh ADDRriS11_1:$src1))>; + +def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)), + (i32 (LDriuh_indexed (i32 IntRegs:$src1), s11_1ImmPred:$offset))>; + + + +// 32 bit atomic load +def : Pat<(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDw_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_32 ADDRriS11_2:$src1), + (i32 (LDriw ADDRriS11_2:$src1))>; + +def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)), + (i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>; + + +// 64 bit atomic load +def : Pat<(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), + (i64 (LDd_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_load_64 ADDRriS11_3:$src1), + (i64 (LDrid ADDRriS11_3:$src1))>; -// Map from r0 = and(r1, 65535) to r0 = zxth(r1). -def : Pat <(and IntRegs:$src1, 65535), - (ZXTH IntRegs:$src1)>; +def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)), + (i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>; + + +// 64 bit atomic store +def : Pat<(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), + (i64 DoubleRegs:$src1)), + (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i64 DoubleRegs:$src1)), + (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, + (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; + +// 8 bit atomic store +def : Pat<(atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, Requires<[NoV4T]>; + +def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)), + (STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>; + +def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrib_indexed (i32 IntRegs:$src2), s11_0ImmPred:$offset, + (i32 IntRegs:$src1))>; + + +// 16 bit atomic store +def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, Requires<[NoV4T]>; + +def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)), + (STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>; + +def : Pat<(atomic_store_16 (i32 IntRegs:$src1), + (add (i32 IntRegs:$src2), s11_1ImmPred:$offset)), + (STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset, + (i32 IntRegs:$src1))>; + + +// 32 bit atomic store +def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STw_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; + +def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)), + (STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>; + +def : Pat<(atomic_store_32 (add (i32 IntRegs:$src2), s11_2ImmPred:$offset), + (i32 IntRegs:$src1)), + (STriw_indexed (i32 IntRegs:$src2), s11_2ImmPred:$offset, + (i32 IntRegs:$src1))>; + + + + +def : Pat<(atomic_store_64 ADDRriS11_3:$src2, (i64 DoubleRegs:$src1)), + (STrid ADDRriS11_3:$src2, (i64 DoubleRegs:$src1))>; + +def : Pat<(atomic_store_64 (add (i32 IntRegs:$src2), s11_3ImmPred:$offset), + (i64 DoubleRegs:$src1)), + (STrid_indexed (i32 IntRegs:$src2), s11_3ImmPred:$offset, + (i64 DoubleRegs:$src1))>; + +// Map from r0 = and(r1, 65535) to r0 = zxth(r1) +def : Pat <(and (i32 IntRegs:$src1), 65535), + (ZXTH (i32 IntRegs:$src1))>; // Map from r0 = and(r1, 255) to r0 = zxtb(r1). -def : Pat <(and IntRegs:$src1, 255), - (ZXTB IntRegs:$src1)>; +def : Pat <(and (i32 IntRegs:$src1), 255), + (ZXTB (i32 IntRegs:$src1))>; // Map Add(p1, true) to p1 = not(p1). // Add(p1, false) should never be produced, // if it does, it got to be mapped to NOOP. -def : Pat <(add PredRegs:$src1, -1), - (NOT_p PredRegs:$src1)>; +def : Pat <(add (i1 PredRegs:$src1), -1), + (NOT_p (i1 PredRegs:$src1))>; // Map from p0 = setlt(r0, r1) r2 = mux(p0, r3, r4) => // p0 = cmp.lt(r0, r1), r0 = mux(p0, r2, r1). -def : Pat <(select (i1 (setlt IntRegs:$src1, IntRegs:$src2)), IntRegs:$src3, - IntRegs:$src4), - (TFR_condset_rr (CMPLTrr IntRegs:$src1, IntRegs:$src2), IntRegs:$src4, - IntRegs:$src3)>, Requires<[HasV2TOnly]>; +def : Pat <(select (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i32 IntRegs:$src3), + (i32 IntRegs:$src4)), + (i32 (TFR_condset_rr (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + (i32 IntRegs:$src4), (i32 IntRegs:$src3)))>, + Requires<[HasV2TOnly]>; // Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i). -def : Pat <(select (not PredRegs:$src1), s8ImmPred:$src2, s8ImmPred:$src3), - (TFR_condset_ii PredRegs:$src1, s8ImmPred:$src3, s8ImmPred:$src2)>; +def : Pat <(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s8ImmPred:$src3), + (i32 (TFR_condset_ii (i1 PredRegs:$src1), s8ImmPred:$src3, + s8ImmPred:$src2))>; + +// Map from p0 = pnot(p0); r0 = select(p0, #i, r1) +// => r0 = TFR_condset_ri(p0, r1, #i) +def : Pat <(select (not (i1 PredRegs:$src1)), s12ImmPred:$src2, + (i32 IntRegs:$src3)), + (i32 (TFR_condset_ri (i1 PredRegs:$src1), (i32 IntRegs:$src3), + s12ImmPred:$src2))>; + +// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i) +// => r0 = TFR_condset_ir(p0, #i, r1) +def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, s12ImmPred:$src3), + (i32 (TFR_condset_ir (i1 PredRegs:$src1), s12ImmPred:$src3, + (i32 IntRegs:$src2)))>; // Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump. def : Pat <(brcond (not PredRegs:$src1), bb:$offset), - (JMP_cNot PredRegs:$src1, bb:$offset)>; + (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>; // Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2). def : Pat <(and PredRegs:$src1, (not PredRegs:$src2)), - (AND_pnotp PredRegs:$src1, PredRegs:$src2)>; + (i1 (AND_pnotp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; // Map from store(globaladdress + x) -> memd(#foo + x). let AddedComplexity = 100 in -def : Pat <(store DoubleRegs:$src1, +def : Pat <(store (i64 DoubleRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, DoubleRegs:$src1)>; + (STrid_GP tglobaladdr:$global, u16ImmPred:$offset, + (i64 DoubleRegs:$src1))>, Requires<[NoV4T]>; -// Map from store(globaladdress) -> memd(#foo + 0). +// Map from store(globaladdress) -> memd(#foo). let AddedComplexity = 100 in -def : Pat <(store DoubleRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)), - (STrid_GP tglobaladdr:$global, 0, DoubleRegs:$src1)>; +def : Pat <(store (i64 DoubleRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STd_GP tglobaladdr:$global, (i64 DoubleRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress + x) -> memw(#foo + x). let AddedComplexity = 100 in -def : Pat <(store IntRegs:$src1, (add (HexagonCONST32_GP tglobaladdr:$global), +def : Pat <(store (i32 IntRegs:$src1), + (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>; + (STriw_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress) -> memw(#foo + 0). let AddedComplexity = 100 in -def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)), - (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>; +def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), + (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>; -// Map from store(globaladdress) -> memw(#foo + 0). +// Map from store(globaladdress) -> memw(#foo). let AddedComplexity = 100 in -def : Pat <(store IntRegs:$src1, (HexagonCONST32_GP tglobaladdr:$global)), - (STriw_GP tglobaladdr:$global, 0, IntRegs:$src1)>; +def : Pat <(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), + (STriw_GP tglobaladdr:$global, 0, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress + x) -> memh(#foo + x). let AddedComplexity = 100 in -def : Pat <(truncstorei16 IntRegs:$src1, +def : Pat <(truncstorei16 (i32 IntRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>; + (STrih_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress) -> memh(#foo). let AddedComplexity = 100 in -def : Pat <(truncstorei16 IntRegs:$src1, +def : Pat <(truncstorei16 (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STh_GP tglobaladdr:$global, IntRegs:$src1)>; + (STh_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress + x) -> memb(#foo + x). let AddedComplexity = 100 in -def : Pat <(truncstorei8 IntRegs:$src1, +def : Pat <(truncstorei8 (i32 IntRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset)), - (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, IntRegs:$src1)>; + (STrib_GP tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from store(globaladdress) -> memb(#foo). let AddedComplexity = 100 in -def : Pat <(truncstorei8 IntRegs:$src1, +def : Pat <(truncstorei8 (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP tglobaladdr:$global, IntRegs:$src1)>; + (STb_GP tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memw(#foo + x). let AddedComplexity = 100 in -def : Pat <(load (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriw_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memw(#foo + 0). +// Map from load(globaladdress) -> memw(#foo). let AddedComplexity = 100 in -def : Pat <(load (HexagonCONST32_GP tglobaladdr:$global)), - (LDw_GP tglobaladdr:$global)>; +def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDw_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memd(#foo + x). let AddedComplexity = 100 in def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global), u16ImmPred:$offset))), - (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset)>; + (i64 (LDrid_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memw(#foo + 0). let AddedComplexity = 100 in def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))), - (LDd_GP tglobaladdr:$global)>; - + (i64 (LDd_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress + 0), Pd = Rd. +// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd. let AddedComplexity = 100 in def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), - (TFR_PdRs (LDrib_GP tglobaladdr:$global, 0))>; + (i1 (TFR_PdRs (i32 (LDb_GP tglobaladdr:$global))))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memh(#foo + x). let AddedComplexity = 100 in -def : Pat <(sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrih_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memh(#foo + 0). +// Map from load(globaladdress + x) -> memh(#foo + x). let AddedComplexity = 100 in -def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDrih_GP tglobaladdr:$global, 0)>; +def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDrih_GP tglobaladdr:$global, 0))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memuh(#foo + x). let AddedComplexity = 100 in -def : Pat <(zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo + 0). +// Map from load(globaladdress) -> memuh(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDriuh_GP tglobaladdr:$global, 0)>; +def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDriuh_GP tglobaladdr:$global, 0))>, + Requires<[NoV4T]>; -// Map from load(globaladdress + x) -> memuh(#foo + x). +// Map from load(globaladdress) -> memh(#foo). let AddedComplexity = 100 in -def : Pat <(extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriuh_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDh_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo + 0). -let AddedComplexity = 100 in -def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDriuh_GP tglobaladdr:$global, 0)>; -// Map from load(globaladdress + x) -> memub(#foo + x). +// Map from load(globaladdress) -> memuh(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDuh_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo + 0). +// Map from load(globaladdress + x) -> memb(#foo + x). let AddedComplexity = 100 in -def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDriub_GP tglobaladdr:$global, 0)>; +def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; // Map from load(globaladdress + x) -> memb(#foo + x). let AddedComplexity = 100 in -def : Pat <(sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset)>; +def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrib_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; + +// Map from load(globaladdress + x) -> memub(#foo + x). +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriub_GP tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memb(#foo). let AddedComplexity = 100 in -def : Pat <(extloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; +def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memb(#foo). let AddedComplexity = 100 in -def : Pat <(sextloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; +def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from load(globaladdress) -> memub(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi8 (HexagonCONST32_GP tglobaladdr:$global)), - (LDub_GP tglobaladdr:$global)>; +def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // When the Interprocedural Global Variable optimizer realizes that a // certain global variable takes only two constant values, it shrinks the // global to a boolean. Catch those loads here in the following 3 patterns. let AddedComplexity = 100 in -def : Pat <(extloadi1 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; +def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; let AddedComplexity = 100 in -def : Pat <(sextloadi1 (HexagonCONST32_GP tglobaladdr:$global)), - (LDb_GP tglobaladdr:$global)>; - -let AddedComplexity = 100 in -def : Pat <(zextloadi1 (HexagonCONST32_GP tglobaladdr:$global)), - (LDub_GP tglobaladdr:$global)>; - -// Map from load(globaladdress) -> memh(#foo). -let AddedComplexity = 100 in -def : Pat <(extloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDh_GP tglobaladdr:$global)>; - -// Map from load(globaladdress) -> memh(#foo). -let AddedComplexity = 100 in -def : Pat <(sextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDh_GP tglobaladdr:$global)>; +def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; -// Map from load(globaladdress) -> memuh(#foo). let AddedComplexity = 100 in -def : Pat <(zextloadi16 (HexagonCONST32_GP tglobaladdr:$global)), - (LDuh_GP tglobaladdr:$global)>; +def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP tglobaladdr:$global))>, + Requires<[NoV4T]>; // Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned. def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)), - (AND_rr (LDrib ADDRriS11_0:$addr), (TFRI 0x1))>; + (i32 (AND_rr (i32 (LDrib ADDRriS11_0:$addr)), (TFRI 0x1)))>; // Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = SXTW(Rss.lo). -def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i32)), - (i64 (SXTW (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg)))>; +def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)), + (i64 (SXTW (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg))))>; // Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = SXTW(SXTH(Rss.lo)). -def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i16)), - (i64 (SXTW (SXTH (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>; +def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)), + (i64 (SXTW (i32 (SXTH (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg))))))>; // Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = SXTW(SXTB(Rss.lo)). -def : Pat <(i64 (sext_inreg DoubleRegs:$src1, i8)), - (i64 (SXTW (SXTB (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg))))>; +def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)), + (i64 (SXTW (i32 (SXTB (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg))))))>; -// We want to prevent emiting pnot's as much as possible. +// We want to prevent emitting pnot's as much as possible. // Map brcond with an unsupported setcc to a JMP_cNot. -def : Pat <(brcond (i1 (setne IntRegs:$src1, IntRegs:$src2)), bb:$offset), - (JMP_cNot (CMPEQrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + bb:$offset), + (JMP_cNot (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + bb:$offset)>; -def : Pat <(brcond (i1 (setne IntRegs:$src1, s10ImmPred:$src2)), bb:$offset), - (JMP_cNot (CMPEQri IntRegs:$src1, s10ImmPred:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)), + bb:$offset), + (JMP_cNot (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>; -def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 -1))), bb:$offset), - (JMP_cNot PredRegs:$src1, bb:$offset)>; +def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset), + (JMP_cNot (i1 PredRegs:$src1), bb:$offset)>; -def : Pat <(brcond (i1 (setne PredRegs:$src1, (i1 0))), bb:$offset), - (JMP_c PredRegs:$src1, bb:$offset)>; +def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset), + (JMP_c (i1 PredRegs:$src1), bb:$offset)>; -def : Pat <(brcond (i1 (setlt IntRegs:$src1, s8ImmPred:$src2)), bb:$offset), - (JMP_cNot (CMPGEri IntRegs:$src1, s8ImmPred:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), + bb:$offset), + (JMP_cNot (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2), bb:$offset)>; -def : Pat <(brcond (i1 (setlt IntRegs:$src1, IntRegs:$src2)), bb:$offset), - (JMP_c (CMPLTrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + bb:$offset), + (JMP_c (CMPLTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), bb:$offset)>; -def : Pat <(brcond (i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)), +def : Pat <(brcond (i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), bb:$offset), - (JMP_cNot (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1), + (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)), bb:$offset)>; -def : Pat <(brcond (i1 (setule IntRegs:$src1, IntRegs:$src2)), bb:$offset), - (JMP_cNot (CMPGTUrr IntRegs:$src1, IntRegs:$src2), bb:$offset)>; +def : Pat <(brcond (i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + bb:$offset), + (JMP_cNot (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)), + bb:$offset)>; -def : Pat <(brcond (i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)), +def : Pat <(brcond (i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), bb:$offset), - (JMP_cNot (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2), - bb:$offset)>; + (JMP_cNot (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), + bb:$offset)>; // Map from a 64-bit select to an emulated 64-bit mux. // Hexagon does not support 64-bit MUXes; so emulate with combines. -def : Pat <(select PredRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), - (COMBINE_rr - (MUX_rr PredRegs:$src1, - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src3, subreg_hireg)), - (MUX_rr PredRegs:$src1, - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src3, subreg_loreg)))>; +def : Pat <(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src3)), + (i64 (COMBINE_rr (i32 (MUX_rr (i1 PredRegs:$src1), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3), + subreg_hireg)))), + (i32 (MUX_rr (i1 PredRegs:$src1), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3), + subreg_loreg))))))>; // Map from a 1-bit select to logical ops. // From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3). -def : Pat <(select PredRegs:$src1, PredRegs:$src2, PredRegs:$src3), - (OR_pp (AND_pp PredRegs:$src1, PredRegs:$src2), - (AND_pp (NOT_p PredRegs:$src1), PredRegs:$src3))>; +def : Pat <(select (i1 PredRegs:$src1), (i1 PredRegs:$src2), + (i1 PredRegs:$src3)), + (OR_pp (AND_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)), + (AND_pp (NOT_p (i1 PredRegs:$src1)), (i1 PredRegs:$src3)))>; // Map Pd = load(addr) -> Rs = load(addr); Pd = Rs. def : Pat<(i1 (load ADDRriS11_2:$addr)), (i1 (TFR_PdRs (i32 (LDrib ADDRriS11_2:$addr))))>; // Map for truncating from 64 immediates to 32 bit immediates. -def : Pat<(i32 (trunc DoubleRegs:$src)), - (i32 (EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))>; +def : Pat<(i32 (trunc (i64 DoubleRegs:$src))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg))>; // Map for truncating from i64 immediates to i1 bit immediates. -def : Pat<(i1 (trunc DoubleRegs:$src)), - (i1 (TFR_PdRs (i32(EXTRACT_SUBREG DoubleRegs:$src, subreg_loreg))))>; +def : Pat<(i1 (trunc (i64 DoubleRegs:$src))), + (i1 (TFR_PdRs (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), + subreg_loreg))))>; // Map memb(Rs) = Rdd -> memb(Rs) = Rt. -def : Pat<(truncstorei8 DoubleRegs:$src, ADDRriS11_0:$addr), - (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src, +def : Pat<(truncstorei8 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg)))>; // Map memh(Rs) = Rdd -> memh(Rs) = Rt. -def : Pat<(truncstorei16 DoubleRegs:$src, ADDRriS11_0:$addr), - (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src, +def : Pat<(truncstorei16 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), + subreg_loreg)))>; +// Map memw(Rs) = Rdd -> memw(Rs) = Rt +def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg)))>; // Map memw(Rs) = Rdd -> memw(Rs) = Rt. -def : Pat<(truncstorei32 DoubleRegs:$src, ADDRriS11_0:$addr), - (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG DoubleRegs:$src, +def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), + (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg)))>; // Map from i1 = constant<-1>; memw(addr) = i1 -> r0 = 1; memw(addr) = r0. @@ -2763,118 +3125,134 @@ let AddedComplexity = 100 in // Map from i1 = constant<-1>; memw(CONST32(#foo)) = i1 -> r0 = 1; // memw(#foo) = r0 def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP tglobaladdr:$global, (TFRI 1))>; - + (STb_GP tglobaladdr:$global, (TFRI 1))>, + Requires<[NoV4T]>; // Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0. def : Pat<(store (i1 -1), ADDRriS11_2:$addr), (STrib ADDRriS11_2:$addr, (TFRI 1))>; // Map from memb(Rs) = Pd -> Rt = mux(Pd, #0, #1); store Rt. -def : Pat<(store PredRegs:$src1, ADDRriS11_2:$addr), - (STrib ADDRriS11_2:$addr, (i32 (MUX_ii PredRegs:$src1, 1, 0)) )>; +def : Pat<(store (i1 PredRegs:$src1), ADDRriS11_2:$addr), + (STrib ADDRriS11_2:$addr, (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0)) )>; // Map Rdd = anyext(Rs) -> Rdd = sxtw(Rs). // Hexagon_TODO: We can probably use combine but that will cost 2 instructions. // Better way to do this? -def : Pat<(i64 (anyext IntRegs:$src1)), - (i64 (SXTW IntRegs:$src1))>; +def : Pat<(i64 (anyext (i32 IntRegs:$src1))), + (i64 (SXTW (i32 IntRegs:$src1)))>; // Map cmple -> cmpgt. // rs <= rt -> !(rs > rt). -def : Pat<(i1 (setle IntRegs:$src1, s10ImmPred:$src2)), - (i1 (NOT_p (CMPGTri IntRegs:$src1, s10ImmPred:$src2)))>; +def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ImmPred:$src2)), + (i1 (NOT_p (CMPGTri (i32 IntRegs:$src1), s10ImmPred:$src2)))>; // rs <= rt -> !(rs > rt). -def : Pat<(i1 (setle IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p (CMPGTrr IntRegs:$src1, IntRegs:$src2)))>; +def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (CMPGTrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>; // Rss <= Rtt -> !(Rss > Rtt). -def : Pat<(i1 (setle DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p (CMPGT64rr DoubleRegs:$src1, DoubleRegs:$src2)))>; +def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (CMPGT64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>; // Map cmpne -> cmpeq. // Hexagon_TODO: We should improve on this. // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne IntRegs:$src1, s10ImmPred:$src2)), - (i1 (NOT_p(i1 (CMPEQri IntRegs:$src1, s10ImmPred:$src2))))>; +def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)), + (i1 (NOT_p(i1 (CMPEQri (i32 IntRegs:$src1), s10ImmPred:$src2))))>; // Map cmpne(Rs) -> !cmpeqe(Rs). // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p(i1 (CMPEQrr IntRegs:$src1, IntRegs:$src2))))>; +def : Pat <(i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (i1 (CMPEQrr (i32 IntRegs:$src1), (i32 IntRegs:$src2)))))>; // Convert setne back to xor for hexagon since we compute w/ pred registers. -def : Pat <(i1 (setne PredRegs:$src1, PredRegs:$src2)), - (i1 (XOR_pp PredRegs:$src1, PredRegs:$src2))>; +def : Pat <(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))), + (i1 (XOR_pp (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; // Map cmpne(Rss) -> !cmpew(Rss). // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p(i1 (CMPEHexagon4rr DoubleRegs:$src1, DoubleRegs:$src2))))>; +def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (i1 (CMPEHexagon4rr (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2)))))>; // Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt). // rs >= rt -> !(rt > rs). -def : Pat <(i1 (setge IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p(i1 (CMPGTrr IntRegs:$src2, IntRegs:$src1))))>; +def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>; -def : Pat <(i1 (setge IntRegs:$src1, s8ImmPred:$src2)), - (i1 (CMPGEri IntRegs:$src1, s8ImmPred:$src2))>; +def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ImmPred:$src2)), + (i1 (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2))>; // Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss). // rss >= rtt -> !(rtt > rss). -def : Pat <(i1 (setge DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p(i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))))>; +def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (i1 (CMPGT64rr (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1)))))>; // Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm). // rs < rt -> !(rs >= rt). -def : Pat <(i1 (setlt IntRegs:$src1, s8ImmPred:$src2)), - (i1 (NOT_p (CMPGEri IntRegs:$src1, s8ImmPred:$src2)))>; +def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), + (i1 (NOT_p (CMPGEri (i32 IntRegs:$src1), s8ImmPred:$src2)))>; -// Map cmplt(Rs, Rt) -> cmplt(Rs, Rt). -// rs < rt -> rs < rt. Let assembler map it. -def : Pat <(i1 (setlt IntRegs:$src1, IntRegs:$src2)), - (i1 (CMPLTrr IntRegs:$src2, IntRegs:$src1))>; +// Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs). +// rs < rt -> rt > rs. +// We can let assembler map it, or we can do in the compiler itself. +def : Pat <(i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (CMPGTrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>; // Map cmplt(Rss, Rtt) -> cmpgt(Rtt, Rss). // rss < rtt -> (rtt > rss). -def : Pat <(i1 (setlt DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (CMPGT64rr DoubleRegs:$src2, DoubleRegs:$src1))>; +def : Pat <(i1 (setlt (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (CMPGT64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>; -// Map from cmpltu(Rs, Rd) -> !cmpgtu(Rs, Rd - 1). +// Map from cmpltu(Rs, Rd) -> cmpgtu(Rd, Rs) // rs < rt -> rt > rs. -def : Pat <(i1 (setult IntRegs:$src1, IntRegs:$src2)), - (i1 (CMPGTUrr IntRegs:$src2, IntRegs:$src1))>; +// We can let assembler map it, or we can do in the compiler itself. +def : Pat <(i1 (setult (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>; -// Map from cmpltu(Rss, Rdd) -> !cmpgtu(Rss, Rdd - 1). +// Map from cmpltu(Rss, Rdd) -> cmpgtu(Rdd, Rss). // rs < rt -> rt > rs. -def : Pat <(i1 (setult DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1))>; +def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>; + +// Generate cmpgeu(Rs, #u8) +def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ImmPred:$src2)), + (i1 (CMPGEUri (i32 IntRegs:$src1), u8ImmPred:$src2))>; + +// Generate cmpgtu(Rs, #u9) +def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ImmPred:$src2)), + (i1 (CMPGTUri (i32 IntRegs:$src1), u9ImmPred:$src2))>; // Map from Rs >= Rt -> !(Rt > Rs). // rs >= rt -> !(rt > rs). -def : Pat <(i1 (setuge IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p (CMPGTUrr IntRegs:$src2, IntRegs:$src1)))>; +def : Pat <(i1 (setuge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src2), (i32 IntRegs:$src1))))>; // Map from Rs >= Rt -> !(Rt > Rs). // rs >= rt -> !(rt > rs). -def : Pat <(i1 (setuge DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src2, DoubleRegs:$src1)))>; +def : Pat <(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1))))>; // Map from cmpleu(Rs, Rs) -> !cmpgtu(Rs, Rs). // Map from (Rs <= Rt) -> !(Rs > Rt). -def : Pat <(i1 (setule IntRegs:$src1, IntRegs:$src2)), - (i1 (NOT_p (CMPGTUrr IntRegs:$src1, IntRegs:$src2)))>; +def : Pat <(i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), + (i1 (NOT_p (CMPGTUrr (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>; // Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1). // Map from (Rs <= Rt) -> !(Rs > Rt). -def : Pat <(i1 (setule DoubleRegs:$src1, DoubleRegs:$src2)), - (i1 (NOT_p (CMPGTU64rr DoubleRegs:$src1, DoubleRegs:$src2)))>; +def : Pat <(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (i1 (NOT_p (CMPGTU64rr (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>; // Sign extends. // i1 -> i32 -def : Pat <(i32 (sext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, -1, 0))>; +def : Pat <(i32 (sext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), -1, 0))>; + +// i1 -> i64 +def : Pat <(i64 (sext (i1 PredRegs:$src1))), + (i64 (COMBINE_rr (TFRI -1), (MUX_ii (i1 PredRegs:$src1), -1, 0)))>; // Convert sign-extended load back to load and sign extend. // i8 -> i64 @@ -2899,16 +3277,16 @@ def: Pat <(i64 (sextloadi32 ADDRriS11_2:$src1)), // Zero extends. // i1 -> i32 -def : Pat <(i32 (zext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, 1, 0))>; +def : Pat <(i32 (zext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>; // i1 -> i64 -def : Pat <(i64 (zext PredRegs:$src1)), - (i64 (COMBINE_rr (TFRI 0), (MUX_ii PredRegs:$src1, 1, 0)))>; +def : Pat <(i64 (zext (i1 PredRegs:$src1))), + (i64 (COMBINE_rr (TFRI 0), (MUX_ii (i1 PredRegs:$src1), 1, 0)))>; // i32 -> i64 -def : Pat <(i64 (zext IntRegs:$src1)), - (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>; +def : Pat <(i64 (zext (i32 IntRegs:$src1))), + (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>; // i8 -> i64 def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)), @@ -2926,16 +3304,16 @@ def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)), (i32 (LDriw ADDRriS11_0:$src1))>; // Map from Rs = Pd to Pd = mux(Pd, #1, #0) -def : Pat <(i32 (zext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, 1, 0))>; +def : Pat <(i32 (zext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>; // Map from Rs = Pd to Pd = mux(Pd, #1, #0) -def : Pat <(i32 (anyext PredRegs:$src1)), - (i32 (MUX_ii PredRegs:$src1, 1, 0))>; +def : Pat <(i32 (anyext (i1 PredRegs:$src1))), + (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))>; // Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0)) -def : Pat <(i64 (anyext PredRegs:$src1)), - (i64 (SXTW (i32 (MUX_ii PredRegs:$src1, 1, 0))))>; +def : Pat <(i64 (anyext (i1 PredRegs:$src1))), + (i64 (SXTW (i32 (MUX_ii (i1 PredRegs:$src1), 1, 0))))>; // Any extended 64-bit load. @@ -2948,75 +3326,103 @@ def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)), (i64 (COMBINE_rr (TFRI 0), (LDrih ADDRriS11_2:$src1)))>; // Map from Rdd = zxtw(Rs) -> Rdd = combine(0, Rs). -def : Pat<(i64 (zext IntRegs:$src1)), - (i64 (COMBINE_rr (TFRI 0), IntRegs:$src1))>; +def : Pat<(i64 (zext (i32 IntRegs:$src1))), + (i64 (COMBINE_rr (TFRI 0), (i32 IntRegs:$src1)))>; // Multiply 64-bit unsigned and use upper result. -def : Pat <(mulhu DoubleRegs:$src1, DoubleRegs:$src2), - (MPYU64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG - (LSRd_ri(MPYU64_acc(MPYU64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG (LSRd_ri(MPYU64 - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - 32) ,subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)), - 32),subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg) - )>; +def : Pat <(mulhu (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), + (i64 + (MPYU64_acc + (i64 + (COMBINE_rr + (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPYU64_acc + (i64 + (MPYU64_acc + (i64 + (COMBINE_rr (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), 32)), + subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))), + 32)), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>; // Multiply 64-bit signed and use upper result. -def : Pat <(mulhs DoubleRegs:$src1, DoubleRegs:$src2), - (MPY64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG - (LSRd_ri(MPY64_acc(MPY64_acc(COMBINE_rr (TFRI 0), - (EXTRACT_SUBREG (LSRd_ri(MPYU64 - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - 32) ,subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, - subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, - subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)), - 32),subreg_loreg)), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg) - )>; +def : Pat <(mulhs (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), + (i64 + (MPY64_acc + (i64 + (COMBINE_rr (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPY64_acc + (i64 + (MPY64_acc + (i64 + (COMBINE_rr (TFRI 0), + (i32 + (EXTRACT_SUBREG + (i64 + (LSRd_ri + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), 32)), + subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))), + 32)), subreg_loreg)))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>; // Hexagon specific ISD nodes. -def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>; +//def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>; +def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, + [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC", - SDTHexagonADJDYNALLOC>; + SDTHexagonADJDYNALLOC>; // Needed to tag these instructions for stack layout. let usesCustomInserter = 1 in def ADJDYNALLOC : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, s16Imm:$src2), "$dst = add($src1, #$src2)", - [(set IntRegs:$dst, (Hexagon_ADJDYNALLOC IntRegs:$src1, - s16ImmPred:$src2))]>; + [(set (i32 IntRegs:$dst), + (Hexagon_ADJDYNALLOC (i32 IntRegs:$src1), + s16ImmPred:$src2))]>; -def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, []>; +def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>; def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>; def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = $src1", - [(set IntRegs:$dst, (Hexagon_ARGEXTEND IntRegs:$src1))]>; + [(set (i32 IntRegs:$dst), + (Hexagon_ARGEXTEND (i32 IntRegs:$src1)))]>; let AddedComplexity = 100 in -def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND IntRegs:$src1), i16)), - (TFR IntRegs:$src1)>; - +def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)), + (COPY (i32 IntRegs:$src1))>; def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>; @@ -3024,12 +3430,91 @@ def HexagonBR_JT: SDNode<"HexagonISD::BR_JT", SDHexagonBR_JT, [SDNPHasChain]>; let isBranch=1, isIndirectBranch=1, isTerminator=1, isBarrier = 1 in def BR_JT : JRInst<(outs), (ins IntRegs:$src), "jumpr $src", - [(HexagonBR_JT IntRegs:$src)]>; + [(HexagonBR_JT (i32 IntRegs:$src))]>; + def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>; def : Pat<(HexagonWrapperJT tjumptable:$dst), - (CONST32_set_jt tjumptable:$dst)>; + (i32 (CONST32_set_jt tjumptable:$dst))>; + +// XTYPE/SHIFT + +// Multi-class for logical operators : +// Shift by immediate/register and accumulate/logical +multiclass xtype_imm<string OpcStr, SDNode OpNode1, SDNode OpNode2> { + def _ri : SInst_acc<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2, u5Imm:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")), + [(set (i32 IntRegs:$dst), + (OpNode2 (i32 IntRegs:$src1), + (OpNode1 (i32 IntRegs:$src2), + u5ImmPred:$src3)))], + "$src1 = $dst">; + + def d_ri : SInst_acc<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2, u6Imm:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, #$src3)")), + [(set (i64 DoubleRegs:$dst), (OpNode2 (i64 DoubleRegs:$src1), + (OpNode1 (i64 DoubleRegs:$src2), u6ImmPred:$src3)))], + "$src1 = $dst">; +} + +// Multi-class for logical operators : +// Shift by register and accumulate/logical (32/64 bits) +multiclass xtype_reg<string OpcStr, SDNode OpNode1, SDNode OpNode2> { + def _rr : SInst_acc<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")), + [(set (i32 IntRegs:$dst), + (OpNode2 (i32 IntRegs:$src1), + (OpNode1 (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], + "$src1 = $dst">; + + def d_rr : SInst_acc<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), + !strconcat("$dst ", !strconcat(OpcStr, "($src2, $src3)")), + [(set (i64 DoubleRegs:$dst), + (OpNode2 (i64 DoubleRegs:$src1), + (OpNode1 (i64 DoubleRegs:$src2), + (i32 IntRegs:$src3))))], + "$src1 = $dst">; + +} + +multiclass basic_xtype_imm<string OpcStr, SDNode OpNode> { +let AddedComplexity = 100 in + defm _ADD : xtype_imm< !strconcat("+= ", OpcStr), OpNode, add>; + defm _SUB : xtype_imm< !strconcat("-= ", OpcStr), OpNode, sub>; + defm _AND : xtype_imm< !strconcat("&= ", OpcStr), OpNode, and>; + defm _OR : xtype_imm< !strconcat("|= ", OpcStr), OpNode, or>; +} + +multiclass basic_xtype_reg<string OpcStr, SDNode OpNode> { +let AddedComplexity = 100 in + defm _ADD : xtype_reg< !strconcat("+= ", OpcStr), OpNode, add>; + defm _SUB : xtype_reg< !strconcat("-= ", OpcStr), OpNode, sub>; + defm _AND : xtype_reg< !strconcat("&= ", OpcStr), OpNode, and>; + defm _OR : xtype_reg< !strconcat("|= ", OpcStr), OpNode, or>; +} + +multiclass xtype_xor_imm<string OpcStr, SDNode OpNode> { +let AddedComplexity = 100 in + defm _XOR : xtype_imm< !strconcat("^= ", OpcStr), OpNode, xor>; +} + +defm ASL : basic_xtype_imm<"asl", shl>, basic_xtype_reg<"asl", shl>, + xtype_xor_imm<"asl", shl>; +defm LSR : basic_xtype_imm<"lsr", srl>, basic_xtype_reg<"lsr", srl>, + xtype_xor_imm<"lsr", srl>; + +defm ASR : basic_xtype_imm<"asr", sra>, basic_xtype_reg<"asr", sra>; +defm LSL : basic_xtype_reg<"lsl", shl>; + +// Change the sign of the immediate for Rd=-mpyi(Rs,#u8) +def : Pat <(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)), + (i32 (MPYI_rin (i32 IntRegs:$src1), u8ImmPred:$src2))>; //===----------------------------------------------------------------------===// // V3 Instructions + @@ -3046,3 +3531,19 @@ include "HexagonInstrInfoV3.td" //===----------------------------------------------------------------------===// include "HexagonInstrInfoV4.td" + +//===----------------------------------------------------------------------===// +// V4 Instructions - +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// V5 Instructions + +//===----------------------------------------------------------------------===// + +include "HexagonInstrInfoV5.td" + +//===----------------------------------------------------------------------===// +// V5 Instructions - +//===----------------------------------------------------------------------===// + + diff --git a/lib/Target/Hexagon/HexagonInstrInfoV3.td b/lib/Target/Hexagon/HexagonInstrInfoV3.td index a73897e..157ab3d 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV3.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV3.td @@ -19,7 +19,7 @@ let isCall = 1, neverHasSideEffects = 1, Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def CALLv3 : JInst<(outs), (ins calltarget:$dst, variable_ops), + def CALLv3 : JInst<(outs), (ins calltarget:$dst), "call $dst", []>, Requires<[HasV3T]>; } @@ -35,16 +35,17 @@ let isCall = 1, neverHasSideEffects = 1, let isCall = 1, neverHasSideEffects = 1, Defs = [D0, D1, D2, D3, D4, D5, D6, D7, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def CALLRv3 : JRInst<(outs), (ins IntRegs:$dst, variable_ops), + def CALLRv3 : JRInst<(outs), (ins IntRegs:$dst), "callr $dst", []>, Requires<[HasV3TOnly]>; } +// Jump to address from register // if(p?.new) jumpr:t r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if ($src1.new) jumpr:t $src2", []>, Requires<[HasV3T]>; } @@ -52,7 +53,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, // if (!p?.new) jumpr:t r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cNotPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if (!$src1.new) jumpr:t $src2", []>, Requires<[HasV3T]>; } @@ -61,7 +62,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, // if(p?.new) jumpr:nt r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if ($src1.new) jumpr:nt $src2", []>, Requires<[HasV3T]>; } @@ -69,7 +70,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, // if (!p?.new) jumpr:nt r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cNotPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if (!$src1.new) jumpr:nt $src2", []>, Requires<[HasV3T]>; } @@ -86,20 +87,22 @@ let AddedComplexity = 200 in def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = max($src2, $src1)", - [(set DoubleRegs:$dst, (select (i1 (setlt DoubleRegs:$src2, - DoubleRegs:$src1)), - DoubleRegs:$src1, - DoubleRegs:$src2))]>, + [(set (i64 DoubleRegs:$dst), + (i64 (select (i1 (setlt (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1))), + (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2))))]>, Requires<[HasV3T]>; let AddedComplexity = 200 in def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = min($src2, $src1)", - [(set DoubleRegs:$dst, (select (i1 (setgt DoubleRegs:$src2, - DoubleRegs:$src1)), - DoubleRegs:$src1, - DoubleRegs:$src2))]>, + [(set (i64 DoubleRegs:$dst), + (i64 (select (i1 (setgt (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src1))), + (i64 DoubleRegs:$src1), + (i64 DoubleRegs:$src2))))]>, Requires<[HasV3T]>; //===----------------------------------------------------------------------===// @@ -109,25 +112,25 @@ Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (seteq IntRegs:$src1, 0)), bb:$offset), -// (JMP_RegEzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (seteq (i32 IntRegs:$src1), 0)), bb:$offset), +// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setne IntRegs:$src1, 0)), bb:$offset), -// (JMP_RegNzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), 0)), bb:$offset), +// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setle IntRegs:$src1, 0)), bb:$offset), -// (JMP_RegLezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setle (i32 IntRegs:$src1), 0)), bb:$offset), +// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setge IntRegs:$src1, 0)), bb:$offset), -// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setge (i32 IntRegs:$src1), 0)), bb:$offset), +// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setgt IntRegs:$src1, -1)), bb:$offset), -// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setgt (i32 IntRegs:$src1), -1)), bb:$offset), +// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; // Map call instruction -def : Pat<(call IntRegs:$dst), - (CALLRv3 IntRegs:$dst)>, Requires<[HasV3T]>; +def : Pat<(call (i32 IntRegs:$dst)), + (CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>; def : Pat<(call tglobaladdr:$dst), (CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>; def : Pat<(call texternalsym:$dst), diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td index 9e60cf2..70448fc 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV4.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td @@ -11,6 +11,12 @@ // //===----------------------------------------------------------------------===// +let neverHasSideEffects = 1 in +def IMMEXT : Immext<(outs), (ins), + "/* immext #... */", + []>, + Requires<[HasV4T]>; + // Hexagon V4 Architecture spec defines 8 instruction classes: // LD ST ALU32 XTYPE J JR MEMOP NV CR SYSTEM(system is not implemented in the // compiler) @@ -250,23 +256,151 @@ def ZXTH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), []>, Requires<[HasV4T]>; +// Generate frame index addresses. +let neverHasSideEffects = 1, isReMaterializable = 1 in +def TFR_FI_immext_V4 : ALU32_ri<(outs IntRegs:$dst), + (ins IntRegs:$src1, s32Imm:$offset), + "$dst = add($src1, ##$offset)", + []>, + Requires<[HasV4T]>; + //===----------------------------------------------------------------------===// // ALU32 - //===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// ALU32/PERM + +//===----------------------------------------------------------------------===// + +// Combine +// Rdd=combine(Rs, #s8) +let neverHasSideEffects = 1 in +def COMBINE_ri_V4 : ALU32_ri<(outs DoubleRegs:$dst), + (ins IntRegs:$src1, s8Imm:$src2), + "$dst = combine($src1, #$src2)", + []>, + Requires<[HasV4T]>; +// Rdd=combine(#s8, Rs) +let neverHasSideEffects = 1 in +def COMBINE_ir_V4 : ALU32_ir<(outs DoubleRegs:$dst), + (ins s8Imm:$src1, IntRegs:$src2), + "$dst = combine(#$src1, $src2)", + []>, + Requires<[HasV4T]>; +//===----------------------------------------------------------------------===// +// ALU32/PERM + +//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // LD + //===----------------------------------------------------------------------===// -/// -/// Make sure that in post increment load, the first operand is always the post -/// increment operand. -/// -//// Load doubleword. -// Rdd=memd(Re=#U6) +// +// These absolute set addressing mode instructions accept immediate as +// an operand. We have duplicated these patterns to take global address. + +let neverHasSideEffects = 1 in +def LDrid_abs_setimm_V4 : LDInst2<(outs DoubleRegs:$dst1, IntRegs:$dst2), + (ins u6Imm:$addr), + "$dst1 = memd($dst2=#$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memb(Re=#U6) +let neverHasSideEffects = 1 in +def LDrib_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins u6Imm:$addr), + "$dst1 = memb($dst2=#$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memh(Re=#U6) +let neverHasSideEffects = 1 in +def LDrih_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins u6Imm:$addr), + "$dst1 = memh($dst2=#$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memub(Re=#U6) +let neverHasSideEffects = 1 in +def LDriub_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins u6Imm:$addr), + "$dst1 = memub($dst2=#$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memuh(Re=#U6) +let neverHasSideEffects = 1 in +def LDriuh_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins u6Imm:$addr), + "$dst1 = memuh($dst2=#$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memw(Re=#U6) +let neverHasSideEffects = 1 in +def LDriw_abs_setimm_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins u6Imm:$addr), + "$dst1 = memw($dst2=#$addr)", + []>, + Requires<[HasV4T]>; + +// Following patterns are defined for absolute set addressing mode +// instruction which take global address as operand. +let neverHasSideEffects = 1 in +def LDrid_abs_set_V4 : LDInst2<(outs DoubleRegs:$dst1, IntRegs:$dst2), + (ins globaladdress:$addr), + "$dst1 = memd($dst2=##$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memb(Re=#U6) +let neverHasSideEffects = 1 in +def LDrib_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins globaladdress:$addr), + "$dst1 = memb($dst2=##$addr)", + []>, + Requires<[HasV4T]>; +// Rd=memh(Re=#U6) +let neverHasSideEffects = 1 in +def LDrih_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins globaladdress:$addr), + "$dst1 = memh($dst2=##$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memub(Re=#U6) +let neverHasSideEffects = 1 in +def LDriub_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins globaladdress:$addr), + "$dst1 = memub($dst2=##$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memuh(Re=#U6) +let neverHasSideEffects = 1 in +def LDriuh_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins globaladdress:$addr), + "$dst1 = memuh($dst2=##$addr)", + []>, + Requires<[HasV4T]>; + +// Rd=memw(Re=#U6) +let neverHasSideEffects = 1 in +def LDriw_abs_set_V4 : LDInst2<(outs IntRegs:$dst1, IntRegs:$dst2), + (ins globaladdress:$addr), + "$dst1 = memw($dst2=##$addr)", + []>, + Requires<[HasV4T]>; + +// Load doubleword. +// +// Make sure that in post increment load, the first operand is always the post +// increment operand. +// // Rdd=memd(Rs+Rt<<#u2) // Special case pattern for indexed load without offset which is easier to // match. AddedComplexity of this pattern should be lower than base+offset load @@ -276,56 +410,58 @@ let AddedComplexity = 10, isPredicable = 1 in def LDrid_indexed_V4 : LDInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memd($src1+$src2<<#0)", - [(set DoubleRegs:$dst, (load (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i64 DoubleRegs:$dst), + (i64 (load (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDrid_indexed_shl_V4 : LDInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memd($src1+$src2<<#$offset)", - [(set DoubleRegs:$dst, (load (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i64 DoubleRegs:$dst), + (i64 (load (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; //// Load doubleword conditionally. // if ([!]Pv[.new]) Rd=memd(Rs+Rt<<#u2) // if (Pv) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrid_indexed_cPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrid_indexed_cPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1) $dst=memd($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv.new) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrid_indexed_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrid_indexed_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1.new) $dst=memd($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrid_indexed_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrid_indexed_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1) $dst=memd($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv.new) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrid_indexed_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrid_indexed_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1.new) $dst=memd($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrid_indexed_shl_cPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrid_indexed_shl_cPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1) $dst=memd($src2+$src3<<#$offset)", @@ -333,8 +469,8 @@ def LDrid_indexed_shl_cPt_V4 : LDInst<(outs DoubleRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrid_indexed_shl_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrid_indexed_shl_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1.new) $dst=memd($src2+$src3<<#$offset)", @@ -342,8 +478,8 @@ def LDrid_indexed_shl_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst), Requires<[HasV4T]>; // if (!Pv) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrid_indexed_shl_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrid_indexed_shl_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1) $dst=memd($src2+$src3<<#$offset)", @@ -351,8 +487,8 @@ def LDrid_indexed_shl_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) Rd=memd(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrid_indexed_shl_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrid_indexed_shl_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1.new) $dst=memd($src2+$src3<<#$offset)", @@ -362,99 +498,101 @@ def LDrid_indexed_shl_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst), // Rdd=memd(Rt<<#u2+#U6) //// Load byte. -// Rd=memb(Re=#U6) - // Rd=memb(Rs+Rt<<#u2) let AddedComplexity = 10, isPredicable = 1 in def LDrib_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memb($src1+$src2<<#0)", - [(set IntRegs:$dst, (sextloadi8 (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i32 IntRegs:$dst), + (i32 (sextloadi8 (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriub_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memub($src1+$src2<<#0)", - [(set IntRegs:$dst, (zextloadi8 (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i32 IntRegs:$dst), + (i32 (zextloadi8 (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriub_ae_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memub($src1+$src2<<#0)", - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i32 IntRegs:$dst), + (i32 (extloadi8 (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDrib_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memb($src1+$src2<<#$offset)", - [(set IntRegs:$dst, - (sextloadi8 (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i32 IntRegs:$dst), + (i32 (sextloadi8 (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriub_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memub($src1+$src2<<#$offset)", - [(set IntRegs:$dst, - (zextloadi8 (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i32 IntRegs:$dst), + (i32 (zextloadi8 (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriub_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memub($src1+$src2<<#$offset)", - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i32 IntRegs:$dst), + (i32 (extloadi8 (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; //// Load byte conditionally. // if ([!]Pv[.new]) Rd=memb(Rs+Rt<<#u2) // if (Pv) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrib_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrib_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1) $dst=memb($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv.new) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrib_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrib_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1.new) $dst=memb($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrib_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrib_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1) $dst=memb($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv.new) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrib_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrib_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1.new) $dst=memb($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrib_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrib_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1) $dst=memb($src2+$src3<<#$offset)", @@ -462,8 +600,8 @@ def LDrib_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrib_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrib_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1.new) $dst=memb($src2+$src3<<#$offset)", @@ -471,8 +609,8 @@ def LDrib_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrib_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrib_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1) $dst=memb($src2+$src3<<#$offset)", @@ -480,8 +618,8 @@ def LDrib_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) Rd=memb(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrib_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrib_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1.new) $dst=memb($src2+$src3<<#$offset)", @@ -491,40 +629,40 @@ def LDrib_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), //// Load unsigned byte conditionally. // if ([!]Pv[.new]) Rd=memub(Rs+Rt<<#u2) // if (Pv) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriub_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriub_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1) $dst=memub($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv.new) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriub_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriub_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1.new) $dst=memub($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriub_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriub_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1) $dst=memub($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv.new) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriub_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriub_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1.new) $dst=memub($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriub_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriub_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1) $dst=memub($src2+$src3<<#$offset)", @@ -532,8 +670,8 @@ def LDriub_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriub_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriub_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1.new) $dst=memub($src2+$src3<<#$offset)", @@ -541,8 +679,8 @@ def LDriub_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriub_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriub_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1) $dst=memub($src2+$src3<<#$offset)", @@ -550,8 +688,8 @@ def LDriub_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) Rd=memub(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriub_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriub_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1.new) $dst=memub($src2+$src3<<#$offset)", @@ -561,31 +699,32 @@ def LDriub_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), // Rd=memb(Rt<<#u2+#U6) //// Load halfword -// Rd=memh(Re=#U6) - // Rd=memh(Rs+Rt<<#u2) let AddedComplexity = 10, isPredicable = 1 in def LDrih_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memh($src1+$src2<<#0)", - [(set IntRegs:$dst, (sextloadi16 (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i32 IntRegs:$dst), + (i32 (sextloadi16 (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriuh_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memuh($src1+$src2<<#0)", - [(set IntRegs:$dst, (zextloadi16 (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i32 IntRegs:$dst), + (i32 (zextloadi16 (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriuh_ae_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memuh($src1+$src2<<#0)", - [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i32 IntRegs:$dst), + (i32 (extloadi16 (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; // Rd=memh(Rs+Rt<<#u2) @@ -593,69 +732,69 @@ let AddedComplexity = 40, isPredicable = 1 in def LDrih_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memh($src1+$src2<<#$offset)", - [(set IntRegs:$dst, - (sextloadi16 (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i32 IntRegs:$dst), + (i32 (sextloadi16 (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriuh_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memuh($src1+$src2<<#$offset)", - [(set IntRegs:$dst, - (zextloadi16 (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i32 IntRegs:$dst), + (i32 (zextloadi16 (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriuh_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memuh($src1+$src2<<#$offset)", - [(set IntRegs:$dst, - (extloadi16 (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i32 IntRegs:$dst), + (i32 (extloadi16 (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; //// Load halfword conditionally. // if ([!]Pv[.new]) Rd=memh(Rs+Rt<<#u2) // if (Pv) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrih_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrih_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1) $dst=memh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrih_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrih_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1.new) $dst=memh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrih_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrih_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1) $dst=memh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDrih_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDrih_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1.new) $dst=memh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrih_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrih_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1) $dst=memh($src2+$src3<<#$offset)", @@ -663,8 +802,8 @@ def LDrih_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrih_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrih_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1.new) $dst=memh($src2+$src3<<#$offset)", @@ -672,8 +811,8 @@ def LDrih_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrih_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrih_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1) $dst=memh($src2+$src3<<#$offset)", @@ -681,8 +820,8 @@ def LDrih_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDrih_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDrih_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1.new) $dst=memh($src2+$src3<<#$offset)", @@ -692,40 +831,40 @@ def LDrih_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), //// Load unsigned halfword conditionally. // if ([!]Pv[.new]) Rd=memuh(Rs+Rt<<#u2) // if (Pv) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriuh_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriuh_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1) $dst=memuh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv.new) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriuh_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriuh_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1.new) $dst=memuh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriuh_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriuh_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1) $dst=memuh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv.new) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriuh_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriuh_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1.new) $dst=memuh($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriuh_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriuh_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1) $dst=memuh($src2+$src3<<#$offset)", @@ -733,8 +872,8 @@ def LDriuh_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriuh_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriuh_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1.new) $dst=memuh($src2+$src3<<#$offset)", @@ -742,8 +881,8 @@ def LDriuh_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriuh_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriuh_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1) $dst=memuh($src2+$src3<<#$offset)", @@ -751,8 +890,8 @@ def LDriuh_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) Rd=memuh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1.new) $dst=memuh($src2+$src3<<#$offset)", @@ -762,6 +901,14 @@ def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), // Rd=memh(Rt<<#u2+#U6) //// Load word. +// Load predicate: Fix for bug 5279. +let neverHasSideEffects = 1 in +def LDriw_pred_V4 : LDInst2<(outs PredRegs:$dst), + (ins MEMri:$addr), + "Error; should not emit", + []>, + Requires<[HasV4T]>; + // Rd=memw(Re=#U6) // Rd=memw(Rs+Rt<<#u2) @@ -769,8 +916,9 @@ let AddedComplexity = 10, isPredicable = 1 in def LDriw_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memw($src1+$src2<<#0)", - [(set IntRegs:$dst, (load (add IntRegs:$src1, - IntRegs:$src2)))]>, + [(set (i32 IntRegs:$dst), + (i32 (load (add (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))))]>, Requires<[HasV4T]>; // Rd=memw(Rs+Rt<<#u2) @@ -778,48 +926,49 @@ let AddedComplexity = 40, isPredicable = 1 in def LDriw_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memw($src1+$src2<<#$offset)", - [(set IntRegs:$dst, (load (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$offset))))]>, + [(set (i32 IntRegs:$dst), + (i32 (load (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$offset)))))]>, Requires<[HasV4T]>; //// Load word conditionally. // if ([!]Pv[.new]) Rd=memw(Rs+Rt<<#u2) // if (Pv) Rd=memw(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriw_indexed_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriw_indexed_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1) $dst=memw($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriw_indexed_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriw_indexed_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if ($src1.new) $dst=memw($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriw_indexed_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriw_indexed_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1) $dst=memw($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (!Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 15, isPredicated = 1 in -def LDriw_indexed_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 15, isPredicated = 1 in +def LDriw_indexed_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), "if (!$src1.new) $dst=memw($src2+$src3<<#0)", []>, Requires<[HasV4T]>; // if (Pv) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriw_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriw_indexed_shl_cPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1) $dst=memw($src2+$src3<<#$offset)", @@ -827,8 +976,8 @@ def LDriw_indexed_shl_cPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriw_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriw_indexed_shl_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if ($src1.new) $dst=memw($src2+$src3<<#$offset)", @@ -836,8 +985,8 @@ def LDriw_indexed_shl_cdnPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriw_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriw_indexed_shl_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1) $dst=memw($src2+$src3<<#$offset)", @@ -845,8 +994,8 @@ def LDriw_indexed_shl_cNotPt_V4 : LDInst<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) Rd=memh(Rs+Rt<<#u2) -let mayLoad = 1, AddedComplexity = 45, isPredicated = 1 in -def LDriw_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), +let AddedComplexity = 45, isPredicated = 1 in +def LDriw_indexed_shl_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$offset), "if (!$src1.new) $dst=memw($src2+$src3<<#$offset)", @@ -859,102 +1008,729 @@ def LDriw_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), // Post-inc Load, Predicated, Dot new -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDrid_cdnPt_V4 : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrid_cdnPt_V4 : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3), "if ($src1.new) $dst1 = memd($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDrid_cdnNotPt_V4 : LDInstPI<(outs DoubleRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrid_cdnNotPt_V4 : LDInst2PI<(outs DoubleRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3), "if (!$src1.new) $dst1 = memd($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDrib_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrib_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if ($src1.new) $dst1 = memb($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDrib_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrib_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if (!$src1.new) $dst1 = memb($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDrih_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrih_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if ($src1.new) $dst1 = memh($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDrih_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDrih_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if (!$src1.new) $dst1 = memh($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDriub_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriub_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if ($src1.new) $dst1 = memub($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDriub_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriub_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3), "if (!$src1.new) $dst1 = memub($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDriuh_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriuh_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if ($src1.new) $dst1 = memuh($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDriuh_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriuh_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3), "if (!$src1.new) $dst1 = memuh($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDriw_cdnPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriw_cdnPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3), "if ($src1.new) $dst1 = memw($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; -let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in -def POST_LDriw_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), +let hasCtrlDep = 1, neverHasSideEffects = 1, isPredicated = 1 in +def POST_LDriw_cdnNotPt_V4 : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2), (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3), "if (!$src1.new) $dst1 = memw($src2++#$src3)", [], "$src2 = $dst2">, Requires<[HasV4T]>; +/// Load from global offset + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDrid_GP_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins globaladdress:$global, u16Imm:$offset), + "$dst=memd(#$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_GP_cPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1) $dst=memd(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_GP_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1) $dst=memd(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_GP_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1.new) $dst=memd(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrid_GP_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1.new) $dst=memd(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDrib_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global, u16Imm:$offset), + "$dst=memb(#$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1) $dst=memb(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1) $dst=memb(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1.new) $dst=memb(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrib_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1.new) $dst=memb(##$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDriub_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global, u16Imm:$offset), + "$dst=memub(#$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1) $dst=memub(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1) $dst=memub(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1.new) $dst=memub(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriub_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1.new) $dst=memub(##$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDrih_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global, u16Imm:$offset), + "$dst=memh(#$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1) $dst=memh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1) $dst=memh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1.new) $dst=memh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDrih_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1.new) $dst=memh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDriuh_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global, u16Imm:$offset), + "$dst=memuh(#$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1) $dst=memuh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1) $dst=memuh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1.new) $dst=memuh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriuh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1.new) $dst=memuh(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDriw_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global, u16Imm:$offset), + "$dst=memw(#$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1) $dst=memw(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1) $dst=memw(##$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if ($src1.new) $dst=memw(##$global+$offset)", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def LDriw_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), + "if (!$src1.new) $dst=memw(##$global+$offset)", + []>, + Requires<[HasV4T]>; + + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDd_GP_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins globaladdress:$global), + "$dst=memd(#$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rtt=memd(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDd_GP_cPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1) $dst=memd(##$global)", + []>, + Requires<[HasV4T]>; + + +// if (!Pv) Rtt=memd(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDd_GP_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1) $dst=memd(##$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rtt=memd(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDd_GP_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1.new) $dst=memd(##$global)", + []>, + Requires<[HasV4T]>; + + +// if (!Pv) Rtt=memd(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDd_GP_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1.new) $dst=memd(##$global)", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDb_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global), + "$dst=memb(#$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memb(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDb_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1) $dst=memb(##$global)", + []>, + Requires<[HasV4T]>; + +// if (!Pv) Rt=memb(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDb_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1) $dst=memb(##$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memb(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDb_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1.new) $dst=memb(##$global)", + []>, + Requires<[HasV4T]>; + +// if (!Pv) Rt=memb(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDb_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1.new) $dst=memb(##$global)", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDub_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global), + "$dst=memub(#$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memub(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDub_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1) $dst=memub(##$global)", + []>, + Requires<[HasV4T]>; + + +// if (!Pv) Rt=memub(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDub_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1) $dst=memub(##$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memub(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDub_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1.new) $dst=memub(##$global)", + []>, + Requires<[HasV4T]>; + + +// if (!Pv) Rt=memub(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDub_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1.new) $dst=memub(##$global)", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDh_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global), + "$dst=memh(#$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1) $dst=memh(##$global)", + []>, + Requires<[HasV4T]>; + +// if (!Pv) Rt=memh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1) $dst=memh(##$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1.new) $dst=memh(##$global)", + []>, + Requires<[HasV4T]>; + +// if (!Pv) Rt=memh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1.new) $dst=memh(##$global)", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDuh_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global), + "$dst=memuh(#$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memuh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDuh_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1) $dst=memuh(##$global)", + []>, + Requires<[HasV4T]>; + +// if (!Pv) Rt=memuh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDuh_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1) $dst=memuh(##$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memuh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDuh_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1.new) $dst=memuh(##$global)", + []>, + Requires<[HasV4T]>; + +// if (!Pv) Rt=memuh(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDuh_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1.new) $dst=memuh(##$global)", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def LDw_GP_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$global), + "$dst=memw(#$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memw(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDw_GP_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1) $dst=memw(##$global)", + []>, + Requires<[HasV4T]>; + + +// if (!Pv) Rt=memw(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDw_GP_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1) $dst=memw(##$global)", + []>, + Requires<[HasV4T]>; + +// if (Pv) Rt=memw(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDw_GP_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if ($src1.new) $dst=memw(##$global)", + []>, + Requires<[HasV4T]>; + + +// if (!Pv) Rt=memw(##global) +let neverHasSideEffects = 1, isPredicated = 1 in +def LDw_GP_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$global), + "if (!$src1.new) $dst=memw(##$global)", + []>, + Requires<[HasV4T]>; + + + +def : Pat <(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), + (i64 (LDd_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +def : Pat <(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDw_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +def : Pat <(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDuh_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +def : Pat <(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), + (i32 (LDub_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memw(#foo + 0) +let AddedComplexity = 100 in +def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i64 (LDd_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd +let AddedComplexity = 100 in +def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>, + Requires<[HasV4T]>; + +// When the Interprocedural Global Variable optimizer realizes that a certain +// global variable takes only two constant values, it shrinks the global to +// a boolean. Catch those loads here in the following 3 patterns. +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memb(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memb(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDb_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memub(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDub_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memh(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDh_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memh(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDh_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memuh(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDuh_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress) -> memw(#foo) +let AddedComplexity = 100 in +def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))), + (i32 (LDw_GP_V4 tglobaladdr:$global))>, + Requires<[HasV4T]>; + +def : Pat <(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +def : Pat <(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +def : Pat <(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +def : Pat <(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress + x) -> memd(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress + x) -> memb(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress + x) -> memb(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress + x) -> memub(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress + x) -> memuh(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress + x) -> memh(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i32 (sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + + +// Map from load(globaladdress + x) -> memuh(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + +// Map from load(globaladdress + x) -> memw(#foo + x) +let AddedComplexity = 100 in +def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset))), + (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, + Requires<[HasV4T]>; + //===----------------------------------------------------------------------===// // LD - @@ -971,18 +1747,70 @@ def POST_LDriw_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), /// last operand. /// -// Store doubleword. // memd(Re=#U6)=Rtt -// TODO: needs to be implemented +def STrid_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), + (ins DoubleRegs:$src1, u6Imm:$src2), + "memd($dst1=#$src2) = $src1", + []>, + Requires<[HasV4T]>; + +// memb(Re=#U6)=Rs +def STrib_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), + (ins IntRegs:$src1, u6Imm:$src2), + "memb($dst1=#$src2) = $src1", + []>, + Requires<[HasV4T]>; + +// memh(Re=#U6)=Rs +def STrih_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), + (ins IntRegs:$src1, u6Imm:$src2), + "memh($dst1=#$src2) = $src1", + []>, + Requires<[HasV4T]>; + +// memw(Re=#U6)=Rs +def STriw_abs_setimm_V4 : STInst2<(outs IntRegs:$dst1), + (ins IntRegs:$src1, u6Imm:$src2), + "memw($dst1=#$src2) = $src1", + []>, + Requires<[HasV4T]>; + +// memd(Re=#U6)=Rtt +def STrid_abs_set_V4 : STInst2<(outs IntRegs:$dst1), + (ins DoubleRegs:$src1, globaladdress:$src2), + "memd($dst1=##$src2) = $src1", + []>, + Requires<[HasV4T]>; + +// memb(Re=#U6)=Rs +def STrib_abs_set_V4 : STInst2<(outs IntRegs:$dst1), + (ins IntRegs:$src1, globaladdress:$src2), + "memb($dst1=##$src2) = $src1", + []>, + Requires<[HasV4T]>; + +// memh(Re=#U6)=Rs +def STrih_abs_set_V4 : STInst2<(outs IntRegs:$dst1), + (ins IntRegs:$src1, globaladdress:$src2), + "memh($dst1=##$src2) = $src1", + []>, + Requires<[HasV4T]>; + +// memw(Re=#U6)=Rs +def STriw_abs_set_V4 : STInst2<(outs IntRegs:$dst1), + (ins IntRegs:$src1, globaladdress:$src2), + "memw($dst1=##$src2) = $src1", + []>, + Requires<[HasV4T]>; -// memd(Rs+#s11:3)=Rtt // memd(Rs+Ru<<#u2)=Rtt let AddedComplexity = 10, isPredicable = 1 in def STrid_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, DoubleRegs:$src4), "memd($src1+$src2<<#$src3) = $src4", - [(store DoubleRegs:$src4, (add IntRegs:$src1, - (shl IntRegs:$src2, u2ImmPred:$src3)))]>, + [(store (i64 DoubleRegs:$src4), + (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memd(Ru<<#u2+#U6)=Rtt @@ -990,9 +1818,9 @@ let AddedComplexity = 10 in def STrid_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, DoubleRegs:$src4), "memd($src1<<#$src2+#$src3) = $src4", - [(store DoubleRegs:$src4, (shl IntRegs:$src1, - (add u2ImmPred:$src2, - u6ImmPred:$src3)))]>, + [(store (i64 DoubleRegs:$src4), + (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), + u6ImmPred:$src3))]>, Requires<[HasV4T]>; // memd(Rx++#s4:3)=Rtt @@ -1009,8 +1837,9 @@ def STrid_shl_V4 : STInst<(outs), // if ([!]Pv[.new]) memd(Rs+#u6:3)=Rtt // if (Pv) memd(Rs+#u6:3)=Rtt // if (Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_cdnPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if ($src1.new) memd($addr) = $src2", []>, @@ -1018,8 +1847,9 @@ def STrid_cdnPt_V4 : STInst<(outs), // if (!Pv) memd(Rs+#u6:3)=Rtt // if (!Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_cdnNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if (!$src1.new) memd($addr) = $src2", []>, @@ -1027,8 +1857,9 @@ def STrid_cdnNotPt_V4 : STInst<(outs), // if (Pv) memd(Rs+#u6:3)=Rtt // if (Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_cdnPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), "if ($src1.new) memd($src2+#$src3) = $src4", @@ -1037,8 +1868,9 @@ def STrid_indexed_cdnPt_V4 : STInst<(outs), // if (!Pv) memd(Rs+#u6:3)=Rtt // if (!Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_cdnNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), "if (!$src1.new) memd($src2+#$src3) = $src4", @@ -1047,8 +1879,9 @@ def STrid_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memd(Rs+Ru<<#u2)=Rtt // if (Pv) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_shl_cPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_shl_cPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), "if ($src1) memd($src2+$src3<<#$src4) = $src5", @@ -1056,24 +1889,27 @@ def STrid_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_shl_cdnPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_shl_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), - "if ($src1) memd($src2+$src3<<#$src4) = $src5", + "if ($src1.new) memd($src2+$src3<<#$src4) = $src5", []>, Requires<[HasV4T]>; // if (!Pv) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_shl_cNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_shl_cNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), "if (!$src1) memd($src2+$src3<<#$src4) = $src5", []>, Requires<[HasV4T]>; // if (!Pv.new) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def STrid_indexed_shl_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), "if (!$src1.new) memd($src2+$src3<<#$src4) = $src5", @@ -1083,8 +1919,9 @@ def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memd(Rx++#s4:3)=Rtt // if (Pv) memd(Rx++#s4:3)=Rtt // if (Pv.new) memd(Rx++#s4:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def POST_STdri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3, s4_3Imm:$offset), "if ($src1.new) memd($src3++#$offset) = $src2", @@ -1094,8 +1931,9 @@ def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memd(Rx++#s4:3)=Rtt // if (!Pv.new) memd(Rx++#s4:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in -def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), +let AddedComplexity = 10, neverHasSideEffects = 1, + isPredicated = 1 in +def POST_STdri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3, s4_3Imm:$offset), "if (!$src1.new) memd($src3++#$offset) = $src2", @@ -1105,15 +1943,12 @@ def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), // Store byte. -// memb(Re=#U6)=Rt -// TODO: needs to be implemented. -// memb(Rs+#s11:0)=Rt // memb(Rs+#u6:0)=#S8 let AddedComplexity = 10, isPredicable = 1 in def STrib_imm_V4 : STInst<(outs), (ins IntRegs:$src1, u6_0Imm:$src2, s8Imm:$src3), "memb($src1+#$src2) = #$src3", - [(truncstorei8 s8ImmPred:$src3, (add IntRegs:$src1, + [(truncstorei8 s8ImmPred:$src3, (add (i32 IntRegs:$src1), u6_0ImmPred:$src2))]>, Requires<[HasV4T]>; @@ -1122,9 +1957,10 @@ let AddedComplexity = 10, isPredicable = 1 in def STrib_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4), "memb($src1+$src2<<#$src3) = $src4", - [(truncstorei8 IntRegs:$src4, (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$src3)))]>, + [(truncstorei8 (i32 IntRegs:$src4), + (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memb(Ru<<#u2+#U6)=Rt @@ -1132,9 +1968,9 @@ let AddedComplexity = 10 in def STrib_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4), "memb($src1<<#$src2+#$src3) = $src4", - [(truncstorei8 IntRegs:$src4, (shl IntRegs:$src1, - (add u2ImmPred:$src2, - u6ImmPred:$src3)))]>, + [(truncstorei8 (i32 IntRegs:$src4), + (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), + u6ImmPred:$src3))]>, Requires<[HasV4T]>; // memb(Rx++#s4:0:circ(Mu))=Rt @@ -1148,32 +1984,36 @@ def STrib_shl_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(#u6)=Rt // if ([!]Pv[.new]) memb(Rs+#u6:0)=#S6 // if (Pv) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_imm_cPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_imm_cPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if ($src1) memb($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (Pv.new) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_imm_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_imm_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if ($src1.new) memb($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (!Pv) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_imm_cNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_imm_cNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if (!$src1) memb($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_imm_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_imm_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if (!$src1.new) memb($src2+#$src3) = #$src4", []>, @@ -1182,8 +2022,9 @@ def STrib_imm_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(Rs+#u6:0)=Rt // if (Pv) memb(Rs+#u6:0)=Rt // if (Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memb($addr) = $src2", []>, @@ -1191,8 +2032,9 @@ def STrib_cdnPt_V4 : STInst<(outs), // if (!Pv) memb(Rs+#u6:0)=Rt // if (!Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memb($addr) = $src2", []>, @@ -1201,16 +2043,18 @@ def STrib_cdnNotPt_V4 : STInst<(outs), // if (Pv) memb(Rs+#u6:0)=Rt // if (!Pv) memb(Rs+#u6:0)=Rt // if (Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_indexed_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_indexed_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1.new) memb($src2+#$src3) = $src4", []>, Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_indexed_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrib_indexed_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1.new) memb($src2+#$src3) = $src4", []>, @@ -1218,8 +2062,9 @@ def STrib_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Rt // if (Pv) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STrib_indexed_shl_cPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrib_indexed_shl_cPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if ($src1) memb($src2+$src3<<#$src4) = $src5", @@ -1227,8 +2072,9 @@ def STrib_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STrib_indexed_shl_cdnPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrib_indexed_shl_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if ($src1.new) memb($src2+$src3<<#$src4) = $src5", @@ -1236,8 +2082,9 @@ def STrib_indexed_shl_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STrib_indexed_shl_cNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrib_indexed_shl_cNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if (!$src1) memb($src2+$src3<<#$src4) = $src5", @@ -1245,8 +2092,9 @@ def STrib_indexed_shl_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrib_indexed_shl_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if (!$src1.new) memb($src2+$src3<<#$src4) = $src5", @@ -1256,8 +2104,9 @@ def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(Rx++#s4:0)=Rt // if (Pv) memb(Rx++#s4:0)=Rt // if (Pv.new) memb(Rx++#s4:0)=Rt -let mayStore = 1, hasCtrlDep = 1 in -def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, + isPredicated = 1 in +def POST_STbri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if ($src1.new) memb($src3++#$offset) = $src2", [],"$src3 = $dst">, @@ -1265,8 +2114,9 @@ def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memb(Rx++#s4:0)=Rt // if (!Pv.new) memb(Rx++#s4:0)=Rt -let mayStore = 1, hasCtrlDep = 1 in -def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, + isPredicated = 1 in +def POST_STbri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if (!$src1.new) memb($src3++#$offset) = $src2", [],"$src3 = $dst">, @@ -1274,20 +2124,15 @@ def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), // Store halfword. -// memh(Re=#U6)=Rt.H -// TODO: needs to be implemented - -// memh(Re=#U6)=Rt // TODO: needs to be implemented - +// memh(Re=#U6)=Rt.H // memh(Rs+#s11:1)=Rt.H -// memh(Rs+#s11:1)=Rt // memh(Rs+#u6:1)=#S8 let AddedComplexity = 10, isPredicable = 1 in def STrih_imm_V4 : STInst<(outs), (ins IntRegs:$src1, u6_1Imm:$src2, s8Imm:$src3), "memh($src1+#$src2) = #$src3", - [(truncstorei16 s8ImmPred:$src3, (add IntRegs:$src1, + [(truncstorei16 s8ImmPred:$src3, (add (i32 IntRegs:$src1), u6_1ImmPred:$src2))]>, Requires<[HasV4T]>; @@ -1299,9 +2144,10 @@ let AddedComplexity = 10, isPredicable = 1 in def STrih_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4), "memh($src1+$src2<<#$src3) = $src4", - [(truncstorei16 IntRegs:$src4, (add IntRegs:$src1, - (shl IntRegs:$src2, - u2ImmPred:$src3)))]>, + [(truncstorei16 (i32 IntRegs:$src4), + (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memh(Ru<<#u2+#U6)=Rt.H @@ -1310,9 +2156,9 @@ let AddedComplexity = 10 in def STrih_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4), "memh($src1<<#$src2+#$src3) = $src4", - [(truncstorei16 IntRegs:$src4, (shl IntRegs:$src1, - (add u2ImmPred:$src2, - u6ImmPred:$src3)))]>, + [(truncstorei16 (i32 IntRegs:$src4), + (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), + u6ImmPred:$src3))]>, Requires<[HasV4T]>; // memh(Rx++#s4:1:circ(Mu))=Rt.H @@ -1323,42 +2169,42 @@ def STrih_shl_V4 : STInst<(outs), // memh(Rx++Mu)=Rt // memh(Rx++Mu:brev)=Rt.H // memh(Rx++Mu:brev)=Rt -// memh(gp+#u16:1)=Rt.H // memh(gp+#u16:1)=Rt - - -// Store halfword conditionally. // if ([!]Pv[.new]) memh(#u6)=Rt.H // if ([!]Pv[.new]) memh(#u6)=Rt // if ([!]Pv[.new]) memh(Rs+#u6:1)=#S6 // if (Pv) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_imm_cPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_imm_cPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if ($src1) memh($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_imm_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_imm_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if ($src1.new) memh($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (!Pv) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_imm_cNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_imm_cNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if (!$src1) memh($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_imm_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_imm_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if (!$src1.new) memh($src2+#$src3) = #$src4", []>, @@ -1370,8 +2216,9 @@ def STrih_imm_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt // if (Pv) memh(Rs+#u6:1)=Rt // if (Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memh($addr) = $src2", []>, @@ -1379,24 +2226,27 @@ def STrih_cdnPt_V4 : STInst<(outs), // if (!Pv) memh(Rs+#u6:1)=Rt // if (!Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memh($addr) = $src2", []>, Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_indexed_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_indexed_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1.new) memh($src2+#$src3) = $src4", []>, Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_indexed_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STrih_indexed_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1.new) memh($src2+#$src3) = $src4", []>, @@ -1405,8 +2255,9 @@ def STrih_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt.H // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt // if (Pv) memh(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STrih_indexed_shl_cPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrih_indexed_shl_cPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if ($src1) memh($src2+$src3<<#$src4) = $src5", @@ -1414,7 +2265,9 @@ def STrih_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+Ru<<#u2)=Rt -def STrih_indexed_shl_cdnPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrih_indexed_shl_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if ($src1.new) memh($src2+$src3<<#$src4) = $src5", @@ -1422,8 +2275,9 @@ def STrih_indexed_shl_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STrih_indexed_shl_cNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrih_indexed_shl_cNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if (!$src1) memh($src2+$src3<<#$src4) = $src5", @@ -1431,8 +2285,9 @@ def STrih_indexed_shl_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STrih_indexed_shl_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if (!$src1.new) memh($src2+$src3<<#$src4) = $src5", @@ -1445,8 +2300,9 @@ def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memh(Rx++#s4:1)=Rt // if (Pv) memh(Rx++#s4:1)=Rt // if (Pv.new) memh(Rx++#s4:1)=Rt -let mayStore = 1, hasCtrlDep = 1 in -def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, + isPredicated = 1 in +def POST_SThri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if ($src1.new) memh($src3++#$offset) = $src2", [],"$src3 = $dst">, @@ -1454,8 +2310,9 @@ def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memh(Rx++#s4:1)=Rt // if (!Pv.new) memh(Rx++#s4:1)=Rt -let mayStore = 1, hasCtrlDep = 1 in -def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, + isPredicated = 1 in +def POST_SThri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if (!$src1.new) memh($src3++#$offset) = $src2", [],"$src3 = $dst">, @@ -1466,13 +2323,22 @@ def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), // memw(Re=#U6)=Rt // TODO: Needs to be implemented. -// memw(Rs+#s11:2)=Rt +// Store predicate: +let neverHasSideEffects = 1 in +def STriw_pred_V4 : STInst2<(outs), + (ins MEMri:$addr, PredRegs:$src1), + "Error; should not emit", + []>, + Requires<[HasV4T]>; + + // memw(Rs+#u6:2)=#S8 let AddedComplexity = 10, isPredicable = 1 in def STriw_imm_V4 : STInst<(outs), (ins IntRegs:$src1, u6_2Imm:$src2, s8Imm:$src3), "memw($src1+#$src2) = #$src3", - [(store s8ImmPred:$src3, (add IntRegs:$src1, u6_2ImmPred:$src2))]>, + [(store s8ImmPred:$src3, (add (i32 IntRegs:$src1), + u6_2ImmPred:$src2))]>, Requires<[HasV4T]>; // memw(Rs+Ru<<#u2)=Rt @@ -1480,8 +2346,9 @@ let AddedComplexity = 10, isPredicable = 1 in def STriw_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4), "memw($src1+$src2<<#$src3) = $src4", - [(store IntRegs:$src4, (add IntRegs:$src1, - (shl IntRegs:$src2, u2ImmPred:$src3)))]>, + [(store (i32 IntRegs:$src4), (add (i32 IntRegs:$src1), + (shl (i32 IntRegs:$src2), + u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memw(Ru<<#u2+#U6)=Rt @@ -1489,8 +2356,9 @@ let AddedComplexity = 10 in def STriw_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4), "memw($src1<<#$src2+#$src3) = $src4", - [(store IntRegs:$src4, (shl IntRegs:$src1, - (add u2ImmPred:$src2, u6ImmPred:$src3)))]>, + [(store (i32 IntRegs:$src4), + (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), + u6ImmPred:$src3))]>, Requires<[HasV4T]>; // memw(Rx++#s4:2)=Rt @@ -1502,37 +2370,39 @@ def STriw_shl_V4 : STInst<(outs), // Store word conditionally. -// if ([!]Pv[.new]) memw(#u6)=Rt -// TODO: Needs to be implemented. // if ([!]Pv[.new]) memw(Rs+#u6:2)=#S6 // if (Pv) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_imm_cPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_imm_cPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if ($src1) memw($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (Pv.new) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_imm_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_imm_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if ($src1.new) memw($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (!Pv) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_imm_cNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_imm_cNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if (!$src1) memw($src2+#$src3) = #$src4", []>, Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_imm_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_imm_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if (!$src1.new) memw($src2+#$src3) = #$src4", []>, @@ -1541,8 +2411,9 @@ def STriw_imm_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memw(Rs+#u6:2)=Rt // if (Pv) memw(Rs+#u6:2)=Rt // if (Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memw($addr) = $src2", []>, @@ -1550,8 +2421,9 @@ def STriw_cdnPt_V4 : STInst<(outs), // if (!Pv) memw(Rs+#u6:2)=Rt // if (!Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memw($addr) = $src2", []>, @@ -1560,16 +2432,18 @@ def STriw_cdnNotPt_V4 : STInst<(outs), // if (Pv) memw(Rs+#u6:2)=Rt // if (!Pv) memw(Rs+#u6:2)=Rt // if (Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_indexed_cdnPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_indexed_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1.new) memw($src2+#$src3) = $src4", []>, Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_indexed_cdnNotPt_V4 : STInst<(outs), +let neverHasSideEffects = 1, + isPredicated = 1 in +def STriw_indexed_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1.new) memw($src2+#$src3) = $src4", []>, @@ -1577,8 +2451,9 @@ def STriw_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Rt // if (Pv) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STriw_indexed_shl_cPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STriw_indexed_shl_cPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if ($src1) memw($src2+$src3<<#$src4) = $src5", @@ -1586,8 +2461,9 @@ def STriw_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STriw_indexed_shl_cdnPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STriw_indexed_shl_cdnPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if ($src1.new) memw($src2+$src3<<#$src4) = $src5", @@ -1595,8 +2471,9 @@ def STriw_indexed_shl_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STriw_indexed_shl_cNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STriw_indexed_shl_cNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if (!$src1) memw($src2+$src3<<#$src4) = $src5", @@ -1604,8 +2481,9 @@ def STriw_indexed_shl_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10 in -def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs), +let AddedComplexity = 10, + isPredicated = 1 in +def STriw_indexed_shl_cdnNotPt_V4 : STInst2<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), "if (!$src1.new) memw($src2+$src3<<#$src4) = $src5", @@ -1615,8 +2493,9 @@ def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memw(Rx++#s4:2)=Rt // if (Pv) memw(Rx++#s4:2)=Rt // if (Pv.new) memw(Rx++#s4:2)=Rt -let mayStore = 1, hasCtrlDep = 1 in -def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, + isPredicated = 1 in +def POST_STwri_cdnPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if ($src1.new) memw($src3++#$offset) = $src2", [],"$src3 = $dst">, @@ -1624,14 +2503,456 @@ def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memw(Rx++#s4:2)=Rt // if (!Pv.new) memw(Rx++#s4:2)=Rt -let mayStore = 1, hasCtrlDep = 1 in -def POST_STwri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), +let hasCtrlDep = 1, + isPredicated = 1 in +def POST_STwri_cdnNotPt_V4 : STInst2PI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if (!$src1.new) memw($src3++#$offset) = $src2", [],"$src3 = $dst">, Requires<[HasV4T]>; +/// store to global address + +let isPredicable = 1, neverHasSideEffects = 1 in +def STrid_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src), + "memd(#$global+$offset) = $src", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrid_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + DoubleRegs:$src2), + "if ($src1) memd(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrid_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + DoubleRegs:$src2), + "if (!$src1) memd(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrid_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + DoubleRegs:$src2), + "if ($src1.new) memd(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrid_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + DoubleRegs:$src2), + "if (!$src1.new) memd(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def STrib_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), + "memb(#$global+$offset) = $src", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1) memb(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1) memb(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1.new) memb(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrib_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1.new) memb(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def STrih_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), + "memh(#$global+$offset) = $src", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1) memh(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1) memh(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1.new) memh(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STrih_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1.new) memh(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let isPredicable = 1, neverHasSideEffects = 1 in +def STriw_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), + "memw(#$global+$offset) = $src", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1) memw(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1) memw(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1.new) memw(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +let neverHasSideEffects = 1, isPredicated = 1 in +def STriw_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1.new) memw(##$global+$offset) = $src2", + []>, + Requires<[HasV4T]>; + +// memd(#global)=Rtt +let isPredicable = 1, neverHasSideEffects = 1 in +def STd_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, DoubleRegs:$src), + "memd(#$global) = $src", + []>, + Requires<[HasV4T]>; + +// if (Pv) memd(##global) = Rtt +let neverHasSideEffects = 1, isPredicated = 1 in +def STd_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), + "if ($src1) memd(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memd(##global) = Rtt +let neverHasSideEffects = 1, isPredicated = 1 in +def STd_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), + "if (!$src1) memd(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (Pv) memd(##global) = Rtt +let neverHasSideEffects = 1, isPredicated = 1 in +def STd_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), + "if ($src1.new) memd(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memd(##global) = Rtt +let neverHasSideEffects = 1, isPredicated = 1 in +def STd_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), + "if (!$src1.new) memd(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// memb(#global)=Rt +let isPredicable = 1, neverHasSideEffects = 1 in +def STb_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memb(#$global) = $src", + []>, + Requires<[HasV4T]>; + +// if (Pv) memb(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STb_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1) memb(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memb(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STb_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1) memb(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (Pv) memb(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STb_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1.new) memb(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memb(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STb_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1.new) memb(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// memh(#global)=Rt +let isPredicable = 1, neverHasSideEffects = 1 in +def STh_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memh(#$global) = $src", + []>, + Requires<[HasV4T]>; + +// if (Pv) memh(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STh_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1) memh(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memh(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STh_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1) memh(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (Pv) memh(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STh_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1.new) memh(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memh(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STh_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1.new) memh(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// memw(#global)=Rt +let isPredicable = 1, neverHasSideEffects = 1 in +def STw_GP_V4 : STInst2<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memw(#$global) = $src", + []>, + Requires<[HasV4T]>; + +// if (Pv) memw(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STw_GP_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1) memw(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memw(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STw_GP_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1) memw(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (Pv) memw(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STw_GP_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1.new) memw(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memw(##global) = Rt +let neverHasSideEffects = 1, isPredicated = 1 in +def STw_GP_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1.new) memw(##$global) = $src2", + []>, + Requires<[HasV4T]>; + +// 64 bit atomic store +def : Pat <(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), + (i64 DoubleRegs:$src1)), + (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress) -> memd(#foo) +let AddedComplexity = 100 in +def : Pat <(store (i64 DoubleRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>, + Requires<[HasV4T]>; + +// 8 bit atomic store +def : Pat < (atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress) -> memb(#foo) +let AddedComplexity = 100 in +def : Pat<(truncstorei8 (i32 IntRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1" +// to "r0 = 1; memw(#foo) = r0" +let AddedComplexity = 100 in +def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)), + (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>, + Requires<[HasV4T]>; + +def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress) -> memh(#foo) +let AddedComplexity = 100 in +def : Pat<(truncstorei16 (i32 IntRegs:$src1), + (HexagonCONST32_GP tglobaladdr:$global)), + (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// 32 bit atomic store +def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), + (i32 IntRegs:$src1)), + (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress) -> memw(#foo) +let AddedComplexity = 100 in +def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), + (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i64 DoubleRegs:$src1)), + (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i64 DoubleRegs:$src1))>, + Requires<[HasV4T]>; + +def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset), + (i32 IntRegs:$src1)), + (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress + x) -> memd(#foo + x) +let AddedComplexity = 100 in +def : Pat<(store (i64 DoubleRegs:$src1), + (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i64 DoubleRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress + x) -> memb(#foo + x) +let AddedComplexity = 100 in +def : Pat<(truncstorei8 (i32 IntRegs:$src1), + (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress + x) -> memh(#foo + x) +let AddedComplexity = 100 in +def : Pat<(truncstorei16 (i32 IntRegs:$src1), + (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + +// Map from store(globaladdress + x) -> memw(#foo + x) +let AddedComplexity = 100 in +def : Pat<(store (i32 IntRegs:$src1), + (add (HexagonCONST32_GP tglobaladdr:$global), + u16ImmPred:$offset)), + (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, + (i32 IntRegs:$src1))>, + Requires<[HasV4T]>; + + + //===----------------------------------------------------------------------=== // ST - //===----------------------------------------------------------------------=== @@ -1696,11 +3017,19 @@ def STrib_GP_nv_V4 : NVInst_V4<(outs), []>, Requires<[HasV4T]>; +// memb(#global)=Nt.new +let mayStore = 1, neverHasSideEffects = 1 in +def STb_GP_nv_V4 : NVInst_V4<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memb(#$global) = $src.new", + []>, + Requires<[HasV4T]>; // Store new-value byte conditionally. // if ([!]Pv[.new]) memb(#u6)=Nt.new // if (Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memb($addr) = $src2.new", @@ -1708,7 +3037,8 @@ def STrib_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memb($addr) = $src2.new", @@ -1716,7 +3046,8 @@ def STrib_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memb($addr) = $src2.new", @@ -1724,7 +3055,8 @@ def STrib_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memb($addr) = $src2.new", @@ -1732,7 +3064,8 @@ def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1) memb($src2+#$src3) = $src4.new", @@ -1740,7 +3073,8 @@ def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1.new) memb($src2+#$src3) = $src4.new", @@ -1748,7 +3082,8 @@ def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1) memb($src2+#$src3) = $src4.new", @@ -1756,7 +3091,8 @@ def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1.new) memb($src2+#$src3) = $src4.new", @@ -1766,7 +3102,8 @@ def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Nt.new // if (Pv) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1775,7 +3112,8 @@ def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1784,7 +3122,8 @@ def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1793,7 +3132,8 @@ def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1803,7 +3143,8 @@ def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memb(Rx++#s4:0)=Nt.new // if (Pv) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if ($src1) memb($src3++#$offset) = $src2.new", @@ -1811,7 +3152,8 @@ def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if ($src1.new) memb($src3++#$offset) = $src2.new", @@ -1819,7 +3161,8 @@ def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if (!$src1) memb($src3++#$offset) = $src2.new", @@ -1827,7 +3170,8 @@ def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STbri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if (!$src1.new) memb($src3++#$offset) = $src2.new", @@ -1889,6 +3233,14 @@ def STrih_GP_nv_V4 : NVInst_V4<(outs), []>, Requires<[HasV4T]>; +// memh(#global)=Nt.new +let mayStore = 1, neverHasSideEffects = 1 in +def STh_GP_nv_V4 : NVInst_V4<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memh(#$global) = $src.new", + []>, + Requires<[HasV4T]>; + // Store new-value halfword conditionally. @@ -1896,7 +3248,8 @@ def STrih_GP_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memh(Rs+#u6:1)=Nt.new // if (Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memh($addr) = $src2.new", @@ -1904,7 +3257,8 @@ def STrih_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memh($addr) = $src2.new", @@ -1912,7 +3266,8 @@ def STrih_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memh($addr) = $src2.new", @@ -1920,7 +3275,8 @@ def STrih_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memh($addr) = $src2.new", @@ -1928,7 +3284,8 @@ def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1) memh($src2+#$src3) = $src4.new", @@ -1936,7 +3293,8 @@ def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1.new) memh($src2+#$src3) = $src4.new", @@ -1944,7 +3302,8 @@ def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1) memh($src2+#$src3) = $src4.new", @@ -1952,7 +3311,8 @@ def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1.new) memh($src2+#$src3) = $src4.new", @@ -1961,7 +3321,8 @@ def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Nt.new // if (Pv) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1970,7 +3331,8 @@ def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1979,7 +3341,8 @@ def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1988,7 +3351,8 @@ def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -1998,7 +3362,8 @@ def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[]) memh(Rx++#s4:1)=Nt.new // if (Pv) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if ($src1) memh($src3++#$offset) = $src2.new", @@ -2006,7 +3371,8 @@ def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if ($src1.new) memh($src3++#$offset) = $src2.new", @@ -2014,7 +3380,8 @@ def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if (!$src1) memh($src3++#$offset) = $src2.new", @@ -2022,7 +3389,8 @@ def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_SThri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if (!$src1.new) memh($src3++#$offset) = $src2.new", @@ -2085,6 +3453,12 @@ def STriw_GP_nv_V4 : NVInst_V4<(outs), []>, Requires<[HasV4T]>; +let mayStore = 1, neverHasSideEffects = 1 in +def STw_GP_nv_V4 : NVInst_V4<(outs), + (ins globaladdress:$global, IntRegs:$src), + "memw(#$global) = $src.new", + []>, + Requires<[HasV4T]>; // Store new-value word conditionally. @@ -2092,7 +3466,8 @@ def STriw_GP_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memw(Rs+#u6:2)=Nt.new // if (Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memw($addr) = $src2.new", @@ -2100,7 +3475,8 @@ def STriw_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memw($addr) = $src2.new", @@ -2108,7 +3484,8 @@ def STriw_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memw($addr) = $src2.new", @@ -2116,7 +3493,8 @@ def STriw_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memw($addr) = $src2.new", @@ -2124,7 +3502,8 @@ def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1) memw($src2+#$src3) = $src4.new", @@ -2132,7 +3511,8 @@ def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1.new) memw($src2+#$src3) = $src4.new", @@ -2140,7 +3520,8 @@ def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1) memw($src2+#$src3) = $src4.new", @@ -2148,7 +3529,8 @@ def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in +let mayStore = 1, neverHasSideEffects = 1, + isPredicated = 1 in def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1.new) memw($src2+#$src3) = $src4.new", @@ -2158,7 +3540,8 @@ def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Nt.new // if (Pv) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2167,7 +3550,8 @@ def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2176,7 +3560,8 @@ def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2185,7 +3570,8 @@ def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10 in +let mayStore = 1, AddedComplexity = 10, + isPredicated = 1 in def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2195,7 +3581,8 @@ def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memw(Rx++#s4:2)=Nt.new // if (Pv) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if ($src1) memw($src3++#$offset) = $src2.new", @@ -2203,7 +3590,8 @@ def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if ($src1.new) memw($src3++#$offset) = $src2.new", @@ -2211,7 +3599,8 @@ def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if (!$src1) memw($src3++#$offset) = $src2.new", @@ -2219,7 +3608,8 @@ def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1 in +let mayStore = 1, hasCtrlDep = 1, + isPredicated = 1 in def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if (!$src1.new) memw($src3++#$offset) = $src2.new", @@ -2227,6 +3617,199 @@ def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; + +// if (Pv) memb(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STb_GP_cPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1) memb(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memb(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STb_GP_cNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1) memb(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (Pv) memb(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STb_GP_cdnPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1.new) memb(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memb(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STb_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1.new) memb(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (Pv) memh(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STh_GP_cPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1) memh(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memh(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STh_GP_cNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1) memh(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (Pv) memh(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STh_GP_cdnPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1.new) memh(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memh(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STh_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1.new) memh(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (Pv) memw(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STw_GP_cPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1) memw(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memw(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STw_GP_cNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1) memw(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (Pv) memw(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STw_GP_cdnPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if ($src1.new) memw(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +// if (!Pv) memw(##global) = Rt +let mayStore = 1, neverHasSideEffects = 1 in +def STw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), + "if (!$src1.new) memw(##$global) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrib_GP_cPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1) memb(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrib_GP_cNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1) memb(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrib_GP_cdnPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1.new) memb(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrib_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1.new) memb(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrih_GP_cPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1) memh(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrih_GP_cNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1) memh(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrih_GP_cdnPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1.new) memh(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STrih_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1.new) memh(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STriw_GP_cPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1) memw(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STriw_GP_cNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1) memw(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STriw_GP_cdnPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if ($src1.new) memw(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + +let mayStore = 1, neverHasSideEffects = 1 in +def STriw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, + IntRegs:$src2), + "if (!$src1.new) memw(##$global+$offset) = $src2.new", + []>, + Requires<[HasV4T]>; + //===----------------------------------------------------------------------===// // NV/ST - //===----------------------------------------------------------------------===// @@ -2253,7 +3836,8 @@ multiclass NVJ_type_basic_reg<string NotStr, string OpcStr, string TakenStr> { Requires<[HasV4T]>; } -multiclass NVJ_type_basic_2ndDotNew<string NotStr, string OpcStr, string TakenStr> { +multiclass NVJ_type_basic_2ndDotNew<string NotStr, string OpcStr, + string TakenStr> { def _ie_nv_V4 : NVInst_V4<(outs), (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset), !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr, @@ -2307,7 +3891,8 @@ multiclass NVJ_type_basic_neg<string NotStr, string OpcStr, string TakenStr> { Requires<[HasV4T]>; } -multiclass NVJ_type_basic_tstbit<string NotStr, string OpcStr, string TakenStr> { +multiclass NVJ_type_basic_tstbit<string NotStr, string OpcStr, + string TakenStr> { def _ie_nv_V4 : NVInst_V4<(outs), (ins IntRegs:$src1, u1Imm:$src2, brtarget:$offset), !strconcat("if (", !strconcat(NotStr, !strconcat(OpcStr, @@ -2416,16 +4001,18 @@ let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC] in { def ADDr_ADDri_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s6Imm:$src3), "$dst = add($src1, add($src2, #$src3))", - [(set IntRegs:$dst, - (add IntRegs:$src1, (add IntRegs:$src2, s6ImmPred:$src3)))]>, + [(set (i32 IntRegs:$dst), + (add (i32 IntRegs:$src1), (add (i32 IntRegs:$src2), + s6ImmPred:$src3)))]>, Requires<[HasV4T]>; // Rd=add(Rs,sub(#s6,Ru)) def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3), "$dst = add($src1, sub(#$src2, $src3))", - [(set IntRegs:$dst, - (add IntRegs:$src1, (sub s6ImmPred:$src2, IntRegs:$src3)))]>, + [(set (i32 IntRegs:$dst), + (add (i32 IntRegs:$src1), (sub s6ImmPred:$src2, + (i32 IntRegs:$src3))))]>, Requires<[HasV4T]>; // Generates the same instruction as ADDr_SUBri_V4 but matches different @@ -2434,8 +4021,9 @@ def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst), def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3), "$dst = add($src1, sub(#$src2, $src3))", - [(set IntRegs:$dst, - (sub (add IntRegs:$src1, s6ImmPred:$src2), IntRegs:$src3))]>, + [(set (i32 IntRegs:$dst), + (sub (add (i32 IntRegs:$src1), s6ImmPred:$src2), + (i32 IntRegs:$src3)))]>, Requires<[HasV4T]>; @@ -2451,16 +4039,16 @@ def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst), def ANDd_NOTd_V4 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = and($src1, ~$src2)", - [(set DoubleRegs:$dst, (and DoubleRegs:$src1, - (not DoubleRegs:$src2)))]>, + [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1), + (not (i64 DoubleRegs:$src2))))]>, Requires<[HasV4T]>; // Rdd=or(Rtt,~Rss) def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = or($src1, ~$src2)", - [(set DoubleRegs:$dst, - (or DoubleRegs:$src1, (not DoubleRegs:$src2)))]>, + [(set (i64 DoubleRegs:$dst), + (or (i64 DoubleRegs:$src1), (not (i64 DoubleRegs:$src2))))]>, Requires<[HasV4T]>; @@ -2469,8 +4057,9 @@ def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst), def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), "$dst ^= xor($src2, $src3)", - [(set DoubleRegs:$dst, - (xor DoubleRegs:$src1, (xor DoubleRegs:$src2, DoubleRegs:$src3)))], + [(set (i64 DoubleRegs:$dst), + (xor (i64 DoubleRegs:$src1), (xor (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2480,8 +4069,9 @@ def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst), def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3), "$dst = or($src1, and($src2, #$src3))", - [(set IntRegs:$dst, - (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))], + [(set (i32 IntRegs:$dst), + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + s10ImmPred:$src3)))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2490,8 +4080,9 @@ def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= and($src2, $src3)", - [(set IntRegs:$dst, - (and IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2499,8 +4090,9 @@ def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= and($src2, $src3)", - [(set IntRegs:$dst, - (or IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2508,8 +4100,9 @@ def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= and($src2, $src3)", - [(set IntRegs:$dst, - (xor IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2518,8 +4111,9 @@ def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= and($src2, ~$src3)", - [(set IntRegs:$dst, - (and IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))], + [(set (i32 IntRegs:$dst), + (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + (not (i32 IntRegs:$src3)))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2527,8 +4121,9 @@ def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= and($src2, ~$src3)", - [(set IntRegs:$dst, - (or IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))], + [(set (i32 IntRegs:$dst), + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + (not (i32 IntRegs:$src3)))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2536,8 +4131,9 @@ def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= and($src2, ~$src3)", - [(set IntRegs:$dst, - (xor IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))], + [(set (i32 IntRegs:$dst), + (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + (not (i32 IntRegs:$src3)))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2546,8 +4142,9 @@ def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= or($src2, $src3)", - [(set IntRegs:$dst, - (and IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (and (i32 IntRegs:$src1), (or (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2555,8 +4152,9 @@ def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= or($src2, $src3)", - [(set IntRegs:$dst, - (or IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (or (i32 IntRegs:$src1), (or (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2564,8 +4162,9 @@ def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= or($src2, $src3)", - [(set IntRegs:$dst, - (xor IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (xor (i32 IntRegs:$src1), (or (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2574,8 +4173,9 @@ def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= xor($src2, $src3)", - [(set IntRegs:$dst, - (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2583,8 +4183,9 @@ def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= xor($src2, $src3)", - [(set IntRegs:$dst, - (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2592,8 +4193,9 @@ def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= xor($src2, $src3)", - [(set IntRegs:$dst, - (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2601,8 +4203,9 @@ def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3), "$dst |= and($src2, #$src3)", - [(set IntRegs:$dst, - (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))], + [(set (i32 IntRegs:$dst), + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + s10ImmPred:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2610,8 +4213,9 @@ def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3), "$dst |= or($src2, #$src3)", - [(set IntRegs:$dst, - (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))], + [(set (i32 IntRegs:$dst), + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), + s10ImmPred:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2663,8 +4267,9 @@ def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst), def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst), (ins u6Imm:$src1, IntRegs:$src2, u6Imm:$src3), "$dst = add(#$src1, mpyi($src2, #$src3))", - [(set IntRegs:$dst, - (add (mul IntRegs:$src2, u6ImmPred:$src3), u6ImmPred:$src1))]>, + [(set (i32 IntRegs:$dst), + (add (mul (i32 IntRegs:$src2), u6ImmPred:$src3), + u6ImmPred:$src1))]>, Requires<[HasV4T]>; // Rd=add(#u6,mpyi(Rs,Rt)) @@ -2672,32 +4277,36 @@ def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst), def ADDi_MPYrr_V4 : MInst<(outs IntRegs:$dst), (ins u6Imm:$src1, IntRegs:$src2, IntRegs:$src3), "$dst = add(#$src1, mpyi($src2, $src3))", - [(set IntRegs:$dst, - (add (mul IntRegs:$src2, IntRegs:$src3), u6ImmPred:$src1))]>, + [(set (i32 IntRegs:$dst), + (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)), + u6ImmPred:$src1))]>, Requires<[HasV4T]>; // Rd=add(Ru,mpyi(#u6:2,Rs)) def ADDr_MPYir_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u6Imm:$src2, IntRegs:$src3), "$dst = add($src1, mpyi(#$src2, $src3))", - [(set IntRegs:$dst, - (add IntRegs:$src1, (mul IntRegs:$src3, u6_2ImmPred:$src2)))]>, + [(set (i32 IntRegs:$dst), + (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src3), + u6_2ImmPred:$src2)))]>, Requires<[HasV4T]>; // Rd=add(Ru,mpyi(Rs,#u6)) def ADDr_MPYri_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u6Imm:$src3), "$dst = add($src1, mpyi($src2, #$src3))", - [(set IntRegs:$dst, - (add IntRegs:$src1, (mul IntRegs:$src2, u6ImmPred:$src3)))]>, + [(set (i32 IntRegs:$dst), + (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2), + u6ImmPred:$src3)))]>, Requires<[HasV4T]>; // Rx=add(Ru,mpyi(Rx,Rs)) def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst = add($src1, mpyi($src2, $src3))", - [(set IntRegs:$dst, - (add IntRegs:$src1, (mul IntRegs:$src2, IntRegs:$src3)))], + [(set (i32 IntRegs:$dst), + (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2), + (i32 IntRegs:$src3))))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2745,8 +4354,9 @@ def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst), def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = add(#$src1, asl($src2, #$src3))", - [(set IntRegs:$dst, - (add (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (add (shl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2754,8 +4364,9 @@ def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = add(#$src1, lsr($src2, #$src3))", - [(set IntRegs:$dst, - (add (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (add (srl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2763,8 +4374,9 @@ def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = sub(#$src1, asl($src2, #$src3))", - [(set IntRegs:$dst, - (sub (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (sub (shl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2772,8 +4384,9 @@ def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = sub(#$src1, lsr($src2, #$src3))", - [(set IntRegs:$dst, - (sub (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (sub (srl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2783,8 +4396,9 @@ def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = and(#$src1, asl($src2, #$src3))", - [(set IntRegs:$dst, - (and (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (and (shl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2792,26 +4406,31 @@ def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), def ANDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = and(#$src1, lsr($src2, #$src3))", - [(set IntRegs:$dst, - (and (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (and (srl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; //Rx=or(#u8,asl(Rx,#U5)) +let AddedComplexity = 30 in def ORi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = or(#$src1, asl($src2, #$src3))", - [(set IntRegs:$dst, - (or (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (or (shl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; //Rx=or(#u8,lsr(Rx,#U5)) +let AddedComplexity = 30 in def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = or(#$src1, lsr($src2, #$src3))", - [(set IntRegs:$dst, - (or (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], + [(set (i32 IntRegs:$dst), + (or (srl (i32 IntRegs:$src2), u5ImmPred:$src3), + u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -2820,7 +4439,8 @@ def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), //Rd=lsl(#s6,Rt) def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2), "$dst = lsl(#$src1, $src2)", - [(set IntRegs:$dst, (shl s6ImmPred:$src1, IntRegs:$src2))]>, + [(set (i32 IntRegs:$dst), (shl s6ImmPred:$src1, + (i32 IntRegs:$src2)))]>, Requires<[HasV4T]>; @@ -2829,8 +4449,9 @@ def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2), def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= asl($src2, $src3)", - [(set DoubleRegs:$dst, - (xor DoubleRegs:$src1, (shl DoubleRegs:$src2, IntRegs:$src3)))], + [(set (i64 DoubleRegs:$dst), + (xor (i64 DoubleRegs:$src1), (shl (i64 DoubleRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2838,8 +4459,9 @@ def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= asr($src2, $src3)", - [(set DoubleRegs:$dst, - (xor DoubleRegs:$src1, (sra DoubleRegs:$src2, IntRegs:$src3)))], + [(set (i64 DoubleRegs:$dst), + (xor (i64 DoubleRegs:$src1), (sra (i64 DoubleRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2847,8 +4469,9 @@ def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= lsl($src2, $src3)", - [(set DoubleRegs:$dst, - (xor DoubleRegs:$src1, (shl DoubleRegs:$src2, IntRegs:$src3)))], + [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1), + (shl (i64 DoubleRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2856,8 +4479,9 @@ def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), def LSRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= lsr($src2, $src3)", - [(set DoubleRegs:$dst, - (xor DoubleRegs:$src1, (srl DoubleRegs:$src2, IntRegs:$src3)))], + [(set (i64 DoubleRegs:$dst), + (xor (i64 DoubleRegs:$src1), (srl (i64 DoubleRegs:$src2), + (i32 IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -2903,16 +4527,16 @@ let AddedComplexity = 30 in def MEMw_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, m6Imm:$addend), "Error; should not emit", - [(store (add (load (add IntRegs:$base, u6_2ImmPred:$offset)), -m6ImmPred:$addend), - (add IntRegs:$base, u6_2ImmPred:$offset))]>, + [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), + m6ImmPred:$addend), + (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) += #U5 let AddedComplexity = 30 in def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$addend), - "memw($base+#$offset) += $addend", + "memw($base+#$offset) += #$addend", []>, Requires<[HasV4T, UseMEMOP]>; @@ -2920,7 +4544,7 @@ def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs), let AddedComplexity = 30 in def MEMw_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$subend), - "memw($base+#$offset) -= $subend", + "memw($base+#$offset) -= #$subend", []>, Requires<[HasV4T, UseMEMOP]>; @@ -2929,9 +4553,9 @@ let AddedComplexity = 30 in def MEMw_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$addend), "memw($base+#$offset) += $addend", - [(store (add (load (add IntRegs:$base, u6_2ImmPred:$offset)), -IntRegs:$addend), - (add IntRegs:$base, u6_2ImmPred:$offset))]>, + [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), + (i32 IntRegs:$addend)), + (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) -= Rt @@ -2939,19 +4563,19 @@ let AddedComplexity = 30 in def MEMw_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$subend), "memw($base+#$offset) -= $subend", - [(store (sub (load (add IntRegs:$base, u6_2ImmPred:$offset)), -IntRegs:$subend), - (add IntRegs:$base, u6_2ImmPred:$offset))]>, + [(store (sub (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), + (i32 IntRegs:$subend)), + (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) &= Rt let AddedComplexity = 30 in def MEMw_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$andend), - "memw($base+#$offset) += $andend", - [(store (and (load (add IntRegs:$base, u6_2ImmPred:$offset)), -IntRegs:$andend), - (add IntRegs:$base, u6_2ImmPred:$offset))]>, + "memw($base+#$offset) &= $andend", + [(store (and (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), + (i32 IntRegs:$andend)), + (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) |= Rt @@ -2959,9 +4583,9 @@ let AddedComplexity = 30 in def MEMw_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$orend), "memw($base+#$offset) |= $orend", - [(store (or (load (add IntRegs:$base, u6_2ImmPred:$offset)), - IntRegs:$orend), - (add IntRegs:$base, u6_2ImmPred:$offset))]>, + [(store (or (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), + (i32 IntRegs:$orend)), + (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // MEMw_ADDSUBi_V4: @@ -2996,7 +4620,7 @@ let AddedComplexity = 30 in def MEMw_ADDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$addend), "memw($addr) += $addend", - [(store (add (load ADDRriU6_2:$addr), IntRegs:$addend), + [(store (add (load ADDRriU6_2:$addr), (i32 IntRegs:$addend)), ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -3005,7 +4629,7 @@ let AddedComplexity = 30 in def MEMw_SUBr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$subend), "memw($addr) -= $subend", - [(store (sub (load ADDRriU6_2:$addr), IntRegs:$subend), + [(store (sub (load ADDRriU6_2:$addr), (i32 IntRegs:$subend)), ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -3014,7 +4638,7 @@ let AddedComplexity = 30 in def MEMw_ANDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$andend), "memw($addr) &= $andend", - [(store (and (load ADDRriU6_2:$addr), IntRegs:$andend), + [(store (and (load ADDRriU6_2:$addr), (i32 IntRegs:$andend)), ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -3023,8 +4647,8 @@ let AddedComplexity = 30 in def MEMw_ORr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$orend), "memw($addr) |= $orend", - [(store (or (load ADDRriU6_2:$addr), IntRegs:$orend), -ADDRriU6_2:$addr)]>, + [(store (or (load ADDRriU6_2:$addr), (i32 IntRegs:$orend)), + ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; //===----------------------------------------------------------------------===// @@ -3060,10 +4684,10 @@ let AddedComplexity = 30 in def MEMh_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, m6Imm:$addend), "Error; should not emit", - [(truncstorei16 (add (sextloadi16 (add IntRegs:$base, + [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base), u6_1ImmPred:$offset)), m6ImmPred:$addend), - (add IntRegs:$base, u6_1ImmPred:$offset))]>, + (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) += #U5 @@ -3087,10 +4711,10 @@ let AddedComplexity = 30 in def MEMh_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$addend), "memh($base+#$offset) += $addend", - [(truncstorei16 (add (sextloadi16 (add IntRegs:$base, + [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base), u6_1ImmPred:$offset)), - IntRegs:$addend), - (add IntRegs:$base, u6_1ImmPred:$offset))]>, + (i32 IntRegs:$addend)), + (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) -= Rt @@ -3098,10 +4722,10 @@ let AddedComplexity = 30 in def MEMh_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$subend), "memh($base+#$offset) -= $subend", - [(truncstorei16 (sub (sextloadi16 (add IntRegs:$base, + [(truncstorei16 (sub (sextloadi16 (add (i32 IntRegs:$base), u6_1ImmPred:$offset)), - IntRegs:$subend), - (add IntRegs:$base, u6_1ImmPred:$offset))]>, + (i32 IntRegs:$subend)), + (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) &= Rt @@ -3109,10 +4733,10 @@ let AddedComplexity = 30 in def MEMh_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$andend), "memh($base+#$offset) += $andend", - [(truncstorei16 (and (sextloadi16 (add IntRegs:$base, + [(truncstorei16 (and (sextloadi16 (add (i32 IntRegs:$base), u6_1ImmPred:$offset)), - IntRegs:$andend), - (add IntRegs:$base, u6_1ImmPred:$offset))]>, + (i32 IntRegs:$andend)), + (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) |= Rt @@ -3120,10 +4744,10 @@ let AddedComplexity = 30 in def MEMh_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$orend), "memh($base+#$offset) |= $orend", - [(truncstorei16 (or (sextloadi16 (add IntRegs:$base, + [(truncstorei16 (or (sextloadi16 (add (i32 IntRegs:$base), u6_1ImmPred:$offset)), - IntRegs:$orend), - (add IntRegs:$base, u6_1ImmPred:$offset))]>, + (i32 IntRegs:$orend)), + (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // MEMh_ADDSUBi_V4: @@ -3159,7 +4783,7 @@ def MEMh_ADDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$addend), "memh($addr) += $addend", [(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr), - IntRegs:$addend), ADDRriU6_1:$addr)]>, + (i32 IntRegs:$addend)), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) -= Rt @@ -3168,7 +4792,7 @@ def MEMh_SUBr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$subend), "memh($addr) -= $subend", [(truncstorei16 (sub (sextloadi16 ADDRriU6_1:$addr), - IntRegs:$subend), ADDRriU6_1:$addr)]>, + (i32 IntRegs:$subend)), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) &= Rt @@ -3177,7 +4801,7 @@ def MEMh_ANDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$andend), "memh($addr) &= $andend", [(truncstorei16 (and (sextloadi16 ADDRriU6_1:$addr), - IntRegs:$andend), ADDRriU6_1:$addr)]>, + (i32 IntRegs:$andend)), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) |= Rt @@ -3186,7 +4810,7 @@ def MEMh_ORr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$orend), "memh($addr) |= $orend", [(truncstorei16 (or (sextloadi16 ADDRriU6_1:$addr), - IntRegs:$orend), ADDRriU6_1:$addr)]>, + (i32 IntRegs:$orend)), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -3223,10 +4847,10 @@ let AddedComplexity = 30 in def MEMb_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, m6Imm:$addend), "Error; should not emit", - [(truncstorei8 (add (sextloadi8 (add IntRegs:$base, + [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base), u6_0ImmPred:$offset)), m6ImmPred:$addend), - (add IntRegs:$base, u6_0ImmPred:$offset))]>, + (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) += #U5 @@ -3250,10 +4874,10 @@ let AddedComplexity = 30 in def MEMb_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$addend), "memb($base+#$offset) += $addend", - [(truncstorei8 (add (sextloadi8 (add IntRegs:$base, + [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base), u6_0ImmPred:$offset)), - IntRegs:$addend), - (add IntRegs:$base, u6_0ImmPred:$offset))]>, + (i32 IntRegs:$addend)), + (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) -= Rt @@ -3261,10 +4885,10 @@ let AddedComplexity = 30 in def MEMb_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$subend), "memb($base+#$offset) -= $subend", - [(truncstorei8 (sub (sextloadi8 (add IntRegs:$base, + [(truncstorei8 (sub (sextloadi8 (add (i32 IntRegs:$base), u6_0ImmPred:$offset)), - IntRegs:$subend), - (add IntRegs:$base, u6_0ImmPred:$offset))]>, + (i32 IntRegs:$subend)), + (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) &= Rt @@ -3272,10 +4896,10 @@ let AddedComplexity = 30 in def MEMb_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$andend), "memb($base+#$offset) += $andend", - [(truncstorei8 (and (sextloadi8 (add IntRegs:$base, + [(truncstorei8 (and (sextloadi8 (add (i32 IntRegs:$base), u6_0ImmPred:$offset)), - IntRegs:$andend), - (add IntRegs:$base, u6_0ImmPred:$offset))]>, + (i32 IntRegs:$andend)), + (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) |= Rt @@ -3283,10 +4907,10 @@ let AddedComplexity = 30 in def MEMb_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$orend), "memb($base+#$offset) |= $orend", - [(truncstorei8 (or (sextloadi8 (add IntRegs:$base, + [(truncstorei8 (or (sextloadi8 (add (i32 IntRegs:$base), u6_0ImmPred:$offset)), - IntRegs:$orend), - (add IntRegs:$base, u6_0ImmPred:$offset))]>, + (i32 IntRegs:$orend)), + (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // MEMb_ADDSUBi_V4: @@ -3322,7 +4946,7 @@ def MEMb_ADDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$addend), "memb($addr) += $addend", [(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr), - IntRegs:$addend), ADDRriU6_0:$addr)]>, + (i32 IntRegs:$addend)), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) -= Rt @@ -3331,7 +4955,7 @@ def MEMb_SUBr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$subend), "memb($addr) -= $subend", [(truncstorei8 (sub (sextloadi8 ADDRriU6_0:$addr), - IntRegs:$subend), ADDRriU6_0:$addr)]>, + (i32 IntRegs:$subend)), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) &= Rt @@ -3340,7 +4964,7 @@ def MEMb_ANDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$andend), "memb($addr) &= $andend", [(truncstorei8 (and (sextloadi8 ADDRriU6_0:$addr), - IntRegs:$andend), ADDRriU6_0:$addr)]>, + (i32 IntRegs:$andend)), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) |= Rt @@ -3349,7 +4973,7 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$orend), "memb($addr) |= $orend", [(truncstorei8 (or (sextloadi8 ADDRriU6_0:$addr), - IntRegs:$orend), ADDRriU6_0:$addr)]>, + (i32 IntRegs:$orend)), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -3364,13 +4988,16 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs), // The implemented patterns are: EQ/GT/GTU. // Missing patterns are: GE/GEU/LT/LTU/LE/LEU. +// Following instruction is not being extended as it results into the +// incorrect code for negative numbers. // Pd=cmpb.eq(Rs,#u8) + let isCompare = 1 in def CMPbEQri_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2), "$dst = cmpb.eq($src1, #$src2)", - [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 255), - u8ImmPred:$src2))]>, + [(set (i1 PredRegs:$dst), + (seteq (and (i32 IntRegs:$src1), 255), u8ImmPred:$src2))]>, Requires<[HasV4T]>; // Pd=cmpb.eq(Rs,Rt) @@ -3378,10 +5005,9 @@ let isCompare = 1 in def CMPbEQrr_ubub_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.eq($src1, $src2)", - [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1, - IntRegs:$src2), - 255), - 0))]>, + [(set (i1 PredRegs:$dst), + (seteq (and (xor (i32 IntRegs:$src1), + (i32 IntRegs:$src2)), 255), 0))]>, Requires<[HasV4T]>; // Pd=cmpb.eq(Rs,Rt) @@ -3389,17 +5015,9 @@ let isCompare = 1 in def CMPbEQrr_sbsb_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.eq($src1, $src2)", - [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 24)), - (shl IntRegs:$src2, (i32 24))))]>, - Requires<[HasV4T]>; - -// Pd=cmpb.gt(Rs,#s8) -let isCompare = 1 in -def CMPbGTri_V4 : MInst<(outs PredRegs:$dst), - (ins IntRegs:$src1, s32Imm:$src2), - "$dst = cmpb.gt($src1, #$src2)", - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)), - s32_24ImmPred:$src2))]>, + [(set (i1 PredRegs:$dst), + (seteq (shl (i32 IntRegs:$src1), (i32 24)), + (shl (i32 IntRegs:$src2), (i32 24))))]>, Requires<[HasV4T]>; // Pd=cmpb.gt(Rs,Rt) @@ -3407,8 +5025,9 @@ let isCompare = 1 in def CMPbGTrr_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.gt($src1, $src2)", - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)), - (shl IntRegs:$src2, (i32 24))))]>, + [(set (i1 PredRegs:$dst), + (setgt (shl (i32 IntRegs:$src1), (i32 24)), + (shl (i32 IntRegs:$src2), (i32 24))))]>, Requires<[HasV4T]>; // Pd=cmpb.gtu(Rs,#u7) @@ -3416,8 +5035,8 @@ let isCompare = 1 in def CMPbGTUri_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u7Imm:$src2), "$dst = cmpb.gtu($src1, #$src2)", - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255), - u7ImmPred:$src2))]>, + [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255), + u7ImmPred:$src2))]>, Requires<[HasV4T]>; // Pd=cmpb.gtu(Rs,Rt) @@ -3425,18 +5044,21 @@ let isCompare = 1 in def CMPbGTUrr_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.gtu($src1, $src2)", - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255), - (and IntRegs:$src2, 255)))]>, + [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255), + (and (i32 IntRegs:$src2), 255)))]>, Requires<[HasV4T]>; +// Following instruction is not being extended as it results into the incorrect +// code for negative numbers. + // Signed half compare(.eq) ri. // Pd=cmph.eq(Rs,#s8) let isCompare = 1 in def CMPhEQri_V4 : MInst<(outs PredRegs:$dst), - (ins IntRegs:$src1, u16Imm:$src2), + (ins IntRegs:$src1, s8Imm:$src2), "$dst = cmph.eq($src1, #$src2)", - [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 65535), - u16_s8ImmPred:$src2))]>, + [(set (i1 PredRegs:$dst), (seteq (and (i32 IntRegs:$src1), 65535), + s8ImmPred:$src2))]>, Requires<[HasV4T]>; // Signed half compare(.eq) rr. @@ -3449,10 +5071,9 @@ let isCompare = 1 in def CMPhEQrr_xor_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.eq($src1, $src2)", - [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1, - IntRegs:$src2), - 65535), - 0))]>, + [(set (i1 PredRegs:$dst), (seteq (and (xor (i32 IntRegs:$src1), + (i32 IntRegs:$src2)), + 65535), 0))]>, Requires<[HasV4T]>; // Signed half compare(.eq) rr. @@ -3465,19 +5086,25 @@ let isCompare = 1 in def CMPhEQrr_shl_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.eq($src1, $src2)", - [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 16)), - (shl IntRegs:$src2, (i32 16))))]>, + [(set (i1 PredRegs:$dst), + (seteq (shl (i32 IntRegs:$src1), (i32 16)), + (shl (i32 IntRegs:$src2), (i32 16))))]>, Requires<[HasV4T]>; +/* Incorrect Pattern -- immediate should be right shifted before being +used in the cmph.gt instruction. // Signed half compare(.gt) ri. // Pd=cmph.gt(Rs,#s8) + let isCompare = 1 in def CMPhGTri_V4 : MInst<(outs PredRegs:$dst), - (ins IntRegs:$src1, s32Imm:$src2), + (ins IntRegs:$src1, s8Imm:$src2), "$dst = cmph.gt($src1, #$src2)", - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)), - s32_16s8ImmPred:$src2))]>, + [(set (i1 PredRegs:$dst), + (setgt (shl (i32 IntRegs:$src1), (i32 16)), + s8ImmPred:$src2))]>, Requires<[HasV4T]>; +*/ // Signed half compare(.gt) rr. // Pd=cmph.gt(Rs,Rt) @@ -3485,8 +5112,9 @@ let isCompare = 1 in def CMPhGTrr_shl_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.gt($src1, $src2)", - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)), - (shl IntRegs:$src2, (i32 16))))]>, + [(set (i1 PredRegs:$dst), + (setgt (shl (i32 IntRegs:$src1), (i32 16)), + (shl (i32 IntRegs:$src2), (i32 16))))]>, Requires<[HasV4T]>; // Unsigned half compare rr (.gtu). @@ -3495,8 +5123,9 @@ let isCompare = 1 in def CMPhGTUrr_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.gtu($src1, $src2)", - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535), - (and IntRegs:$src2, 65535)))]>, + [(set (i1 PredRegs:$dst), + (setugt (and (i32 IntRegs:$src1), 65535), + (and (i32 IntRegs:$src2), 65535)))]>, Requires<[HasV4T]>; // Unsigned half compare ri (.gtu). @@ -3505,8 +5134,8 @@ let isCompare = 1 in def CMPhGTUri_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u7Imm:$src2), "$dst = cmph.gtu($src1, #$src2)", - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535), - u7ImmPred:$src2))]>, + [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 65535), + u7ImmPred:$src2))]>, Requires<[HasV4T]>; //===----------------------------------------------------------------------===// @@ -3523,10 +5152,42 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1, Requires<[HasV4T]>; } +// Restore registers and dealloc return function call. +let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1, + Defs = [R29, R30, R31, PC] in { + def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs), + (ins calltarget:$dst), + "jump $dst // Restore_and_dealloc_return", + []>, + Requires<[HasV4T]>; +} + +// Restore registers and dealloc frame before a tail call. +let isCall = 1, isBarrier = 1, + Defs = [R29, R30, R31, PC] in { + def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs), + (ins calltarget:$dst), + "call $dst // Restore_and_dealloc_before_tailcall", + []>, + Requires<[HasV4T]>; +} + +// Save registers function call. +let isCall = 1, isBarrier = 1, + Uses = [R29, R31] in { + def SAVE_REGISTERS_CALL_V4 : JInst<(outs), + (ins calltarget:$dst), + "call $dst // Save_calle_saved_registers", + []>, + Requires<[HasV4T]>; +} + // if (Ps) dealloc_return let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { - def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, + isPredicated = 1 in { + def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs), + (ins PredRegs:$src1, i32imm:$amt1), "if ($src1) dealloc_return", []>, Requires<[HasV4T]>; @@ -3534,7 +5195,8 @@ let isReturn = 1, isTerminator = 1, // if (!Ps) dealloc_return let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, + isPredicated = 1 in { def DEALLOC_RET_cNotPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if (!$src1) dealloc_return", @@ -3544,7 +5206,8 @@ let isReturn = 1, isTerminator = 1, // if (Ps.new) dealloc_return:nt let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, + isPredicated = 1 in { def DEALLOC_RET_cdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if ($src1.new) dealloc_return:nt", @@ -3554,7 +5217,8 @@ let isReturn = 1, isTerminator = 1, // if (!Ps.new) dealloc_return:nt let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, + isPredicated = 1 in { def DEALLOC_RET_cNotdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if (!$src1.new) dealloc_return:nt", @@ -3564,7 +5228,8 @@ let isReturn = 1, isTerminator = 1, // if (Ps.new) dealloc_return:t let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, + isPredicated = 1 in { def DEALLOC_RET_cdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if ($src1.new) dealloc_return:t", @@ -3574,10 +5239,539 @@ let isReturn = 1, isTerminator = 1, // if (!Ps.new) dealloc_return:nt let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, + isPredicated = 1 in { def DEALLOC_RET_cNotdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if (!$src1.new) dealloc_return:t", []>, Requires<[HasV4T]>; } + + +// Load/Store with absolute addressing mode +// memw(#u6)=Rt + +multiclass ST_abs<string OpcStr> { + let isPredicable = 1 in + def _abs_V4 : STInst2<(outs), + (ins globaladdress:$absaddr, IntRegs:$src), + !strconcat(OpcStr, "(##$absaddr) = $src"), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if ($src1)", + !strconcat(OpcStr, "(##$absaddr) = $src2")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if (!$src1)", + !strconcat(OpcStr, "(##$absaddr) = $src2")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if ($src1.new)", + !strconcat(OpcStr, "(##$absaddr) = $src2")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if (!$src1.new)", + !strconcat(OpcStr, "(##$absaddr) = $src2")), + []>, + Requires<[HasV4T]>; + + def _abs_nv_V4 : STInst2<(outs), + (ins globaladdress:$absaddr, IntRegs:$src), + !strconcat(OpcStr, "(##$absaddr) = $src.new"), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if ($src1)", + !strconcat(OpcStr, "(##$absaddr) = $src2.new")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cNotPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if (!$src1)", + !strconcat(OpcStr, "(##$absaddr) = $src2.new")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if ($src1.new)", + !strconcat(OpcStr, "(##$absaddr) = $src2.new")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnNotPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), + !strconcat("if (!$src1.new)", + !strconcat(OpcStr, "(##$absaddr) = $src2.new")), + []>, + Requires<[HasV4T]>; +} + +let AddedComplexity = 30, isPredicable = 1 in +def STrid_abs_V4 : STInst<(outs), + (ins globaladdress:$absaddr, DoubleRegs:$src), + "memd(##$absaddr) = $src", + [(store (i64 DoubleRegs:$src), + (HexagonCONST32 tglobaladdr:$absaddr))]>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def STrid_abs_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), + "if ($src1) memd(##$absaddr) = $src2", + []>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def STrid_abs_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), + "if (!$src1) memd(##$absaddr) = $src2", + []>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def STrid_abs_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), + "if ($src1.new) memd(##$absaddr) = $src2", + []>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def STrid_abs_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), + "if (!$src1.new) memd(##$absaddr) = $src2", + []>, + Requires<[HasV4T]>; + +defm STrib : ST_abs<"memb">; +defm STrih : ST_abs<"memh">; +defm STriw : ST_abs<"memw">; + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(truncstorei8 (i32 IntRegs:$src1), + (HexagonCONST32 tglobaladdr:$absaddr)), + (STrib_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>; + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(truncstorei16 (i32 IntRegs:$src1), + (HexagonCONST32 tglobaladdr:$absaddr)), + (STrih_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>; + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)), + (STriw_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>; + + +multiclass LD_abs<string OpcStr> { + let isPredicable = 1 in + def _abs_V4 : LDInst2<(outs IntRegs:$dst), + (ins globaladdress:$absaddr), + !strconcat("$dst = ", !strconcat(OpcStr, "(##$absaddr)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + !strconcat("if ($src1) $dst = ", + !strconcat(OpcStr, "(##$absaddr)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + !strconcat("if (!$src1) $dst = ", + !strconcat(OpcStr, "(##$absaddr)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + !strconcat("if ($src1.new) $dst = ", + !strconcat(OpcStr, "(##$absaddr)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + !strconcat("if (!$src1.new) $dst = ", + !strconcat(OpcStr, "(##$absaddr)")), + []>, + Requires<[HasV4T]>; +} + +let AddedComplexity = 30 in +def LDrid_abs_V4 : LDInst<(outs DoubleRegs:$dst), + (ins globaladdress:$absaddr), + "$dst = memd(##$absaddr)", + [(set (i64 DoubleRegs:$dst), + (load (HexagonCONST32 tglobaladdr:$absaddr)))]>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def LDrid_abs_cPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + "if ($src1) $dst = memd(##$absaddr)", + []>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def LDrid_abs_cNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + "if (!$src1) $dst = memd(##$absaddr)", + []>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def LDrid_abs_cdnPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + "if ($src1.new) $dst = memd(##$absaddr)", + []>, + Requires<[HasV4T]>; + +let AddedComplexity = 30, isPredicated = 1 in +def LDrid_abs_cdnNotPt_V4 : LDInst2<(outs DoubleRegs:$dst), + (ins PredRegs:$src1, globaladdress:$absaddr), + "if (!$src1.new) $dst = memd(##$absaddr)", + []>, + Requires<[HasV4T]>; + +defm LDrib : LD_abs<"memb">; +defm LDriub : LD_abs<"memub">; +defm LDrih : LD_abs<"memh">; +defm LDriuh : LD_abs<"memuh">; +defm LDriw : LD_abs<"memw">; + + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(i32 (load (HexagonCONST32 tglobaladdr:$absaddr))), + (LDriw_abs_V4 tglobaladdr: $absaddr)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (sextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))), + (LDrib_abs_V4 tglobaladdr:$absaddr)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (zextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))), + (LDriub_abs_V4 tglobaladdr:$absaddr)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (sextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))), + (LDrih_abs_V4 tglobaladdr:$absaddr)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (zextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))), + (LDriuh_abs_V4 tglobaladdr:$absaddr)>; + +// Transfer global address into a register +let AddedComplexity=50, isMoveImm = 1, isReMaterializable = 1 in +def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$src1), + "$dst = ##$src1", + [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>, + Requires<[HasV4T]>; + +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in +def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$src2), + "if($src1) $dst = ##$src2", + []>, + Requires<[HasV4T]>; + +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in +def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$src2), + "if(!$src1) $dst = ##$src2", + []>, + Requires<[HasV4T]>; + +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in +def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$src2), + "if($src1.new) $dst = ##$src2", + []>, + Requires<[HasV4T]>; + +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in +def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, globaladdress:$src2), + "if(!$src1.new) $dst = ##$src2", + []>, + Requires<[HasV4T]>; + +let AddedComplexity = 50, Predicates = [HasV4T] in +def : Pat<(HexagonCONST32_GP tglobaladdr:$src1), + (TFRI_V4 tglobaladdr:$src1)>; + + +// Load - Indirect with long offset: These instructions take global address +// as an operand +let AddedComplexity = 10 in +def LDrid_ind_lo_V4 : LDInst<(outs DoubleRegs:$dst), + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset), + "$dst=memd($src1<<#$src2+##$offset)", + [(set (i64 DoubleRegs:$dst), + (load (add (shl IntRegs:$src1, u2ImmPred:$src2), + (HexagonCONST32 tglobaladdr:$offset))))]>, + Requires<[HasV4T]>; + +let AddedComplexity = 10 in +multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> { + def _lo_V4 : LDInst<(outs IntRegs:$dst), + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset), + !strconcat("$dst = ", + !strconcat(OpcStr, "($src1<<#$src2+##$offset)")), + [(set IntRegs:$dst, + (i32 (OpNode (add (shl IntRegs:$src1, u2ImmPred:$src2), + (HexagonCONST32 tglobaladdr:$offset)))))]>, + Requires<[HasV4T]>; +} + +defm LDrib_ind : LD_indirect_lo<"memb", sextloadi8>; +defm LDriub_ind : LD_indirect_lo<"memub", zextloadi8>; +defm LDrih_ind : LD_indirect_lo<"memh", sextloadi16>; +defm LDriuh_ind : LD_indirect_lo<"memuh", zextloadi16>; +defm LDriw_ind : LD_indirect_lo<"memw", load>; + +// Store - Indirect with long offset: These instructions take global address +// as an operand +let AddedComplexity = 10 in +def STrid_ind_lo_V4 : STInst<(outs), + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3, + DoubleRegs:$src4), + "memd($src1<<#$src2+#$src3) = $src4", + [(store (i64 DoubleRegs:$src4), + (add (shl IntRegs:$src1, u2ImmPred:$src2), + (HexagonCONST32 tglobaladdr:$src3)))]>, + Requires<[HasV4T]>; + +let AddedComplexity = 10 in +multiclass ST_indirect_lo<string OpcStr, PatFrag OpNode> { + def _lo_V4 : STInst<(outs), + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3, + IntRegs:$src4), + !strconcat(OpcStr, "($src1<<#$src2+##$src3) = $src4"), + [(OpNode (i32 IntRegs:$src4), + (add (shl IntRegs:$src1, u2ImmPred:$src2), + (HexagonCONST32 tglobaladdr:$src3)))]>, + Requires<[HasV4T]>; +} + +defm STrib_ind : ST_indirect_lo<"memb", truncstorei8>; +defm STrih_ind : ST_indirect_lo<"memh", truncstorei16>; +defm STriw_ind : ST_indirect_lo<"memw", store>; + +// Store - absolute addressing mode: These instruction take constant +// value as the extended operand +multiclass ST_absimm<string OpcStr> { + let isPredicable = 1 in + def _abs_V4 : STInst2<(outs), + (ins u6Imm:$src1, IntRegs:$src2), + !strconcat(OpcStr, "(#$src1) = $src2"), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if ($src1)", !strconcat(OpcStr, "(#$src2) = $src3")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if (!$src1)", !strconcat(OpcStr, "(#$src2) = $src3")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if ($src1.new)", + !strconcat(OpcStr, "(#$src2) = $src3")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnNotPt_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if (!$src1.new)", + !strconcat(OpcStr, "(#$src2) = $src3")), + []>, + Requires<[HasV4T]>; + + def _abs_nv_V4 : STInst2<(outs), + (ins u6Imm:$src1, IntRegs:$src2), + !strconcat(OpcStr, "(#$src1) = $src2.new"), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if ($src1)", + !strconcat(OpcStr, "(#$src2) = $src3.new")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cNotPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if (!$src1)", + !strconcat(OpcStr, "(#$src2) = $src3.new")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if ($src1.new)", + !strconcat(OpcStr, "(#$src2) = $src3.new")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnNotPt_nv_V4 : STInst2<(outs), + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), + !strconcat("if (!$src1.new)", + !strconcat(OpcStr, "(#$src2) = $src3.new")), + []>, + Requires<[HasV4T]>; +} + +defm STrib_imm : ST_absimm<"memb">; +defm STrih_imm : ST_absimm<"memh">; +defm STriw_imm : ST_absimm<"memw">; + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(truncstorei8 (i32 IntRegs:$src1), u6ImmPred:$src2), + (STrib_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>; + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(truncstorei16 (i32 IntRegs:$src1), u6ImmPred:$src2), + (STrih_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>; + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(store (i32 IntRegs:$src1), u6ImmPred:$src2), + (STriw_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>; + + +// Load - absolute addressing mode: These instruction take constant +// value as the extended operand + +multiclass LD_absimm<string OpcStr> { + let isPredicable = 1 in + def _abs_V4 : LDInst2<(outs IntRegs:$dst), + (ins u6Imm:$src), + !strconcat("$dst = ", + !strconcat(OpcStr, "(#$src)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, u6Imm:$src2), + !strconcat("if ($src1) $dst = ", + !strconcat(OpcStr, "(#$src2)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, u6Imm:$src2), + !strconcat("if (!$src1) $dst = ", + !strconcat(OpcStr, "(#$src2)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, u6Imm:$src2), + !strconcat("if ($src1.new) $dst = ", + !strconcat(OpcStr, "(#$src2)")), + []>, + Requires<[HasV4T]>; + + let isPredicated = 1 in + def _abs_cdnNotPt_V4 : LDInst2<(outs IntRegs:$dst), + (ins PredRegs:$src1, u6Imm:$src2), + !strconcat("if (!$src1.new) $dst = ", + !strconcat(OpcStr, "(#$src2)")), + []>, + Requires<[HasV4T]>; +} + +defm LDrib_imm : LD_absimm<"memb">; +defm LDriub_imm : LD_absimm<"memub">; +defm LDrih_imm : LD_absimm<"memh">; +defm LDriuh_imm : LD_absimm<"memuh">; +defm LDriw_imm : LD_absimm<"memw">; + +let Predicates = [HasV4T], AddedComplexity = 30 in +def : Pat<(i32 (load u6ImmPred:$src)), + (LDriw_imm_abs_V4 u6ImmPred:$src)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (sextloadi8 u6ImmPred:$src)), + (LDrib_imm_abs_V4 u6ImmPred:$src)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (zextloadi8 u6ImmPred:$src)), + (LDriub_imm_abs_V4 u6ImmPred:$src)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (sextloadi16 u6ImmPred:$src)), + (LDrih_imm_abs_V4 u6ImmPred:$src)>; + +let Predicates = [HasV4T], AddedComplexity=30 in +def : Pat<(i32 (zextloadi16 u6ImmPred:$src)), + (LDriuh_imm_abs_V4 u6ImmPred:$src)>; + + +// Indexed store double word - global address. +// memw(Rs+#u6:2)=#S8 +let AddedComplexity = 10 in +def STriw_offset_ext_V4 : STInst<(outs), + (ins IntRegs:$src1, u6_2Imm:$src2, globaladdress:$src3), + "memw($src1+#$src2) = ##$src3", + [(store (HexagonCONST32 tglobaladdr:$src3), + (add IntRegs:$src1, u6_2ImmPred:$src2))]>, + Requires<[HasV4T]>; + + +// Indexed store double word - global address. +// memw(Rs+#u6:2)=#S8 +let AddedComplexity = 10 in +def STrih_offset_ext_V4 : STInst<(outs), + (ins IntRegs:$src1, u6_1Imm:$src2, globaladdress:$src3), + "memh($src1+#$src2) = ##$src3", + [(truncstorei16 (HexagonCONST32 tglobaladdr:$src3), + (add IntRegs:$src1, u6_1ImmPred:$src2))]>, + Requires<[HasV4T]>; diff --git a/lib/Target/Hexagon/HexagonInstrInfoV5.td b/lib/Target/Hexagon/HexagonInstrInfoV5.td new file mode 100644 index 0000000..92d098c --- /dev/null +++ b/lib/Target/Hexagon/HexagonInstrInfoV5.td @@ -0,0 +1,626 @@ +def SDTHexagonFCONST32 : SDTypeProfile<1, 1, [ + SDTCisVT<0, f32>, + SDTCisPtrTy<1>]>; +def HexagonFCONST32 : SDNode<"HexagonISD::FCONST32", SDTHexagonFCONST32>; + +let isReMaterializable = 1, isMoveImm = 1 in +def FCONST32_nsdata : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst = CONST32(#$global)", + [(set (f32 IntRegs:$dst), + (HexagonFCONST32 tglobaladdr:$global))]>, + Requires<[HasV5T]>; + +let isReMaterializable = 1, isMoveImm = 1 in +def CONST64_Float_Real : LDInst<(outs DoubleRegs:$dst), (ins f64imm:$src1), + "$dst = CONST64(#$src1)", + [(set DoubleRegs:$dst, fpimm:$src1)]>, + Requires<[HasV5T]>; + +let isReMaterializable = 1, isMoveImm = 1 in +def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1), + "$dst = CONST32(#$src1)", + [(set IntRegs:$dst, fpimm:$src1)]>, + Requires<[HasV5T]>; + +// Transfer immediate float. +// Only works with single precision fp value. +// For double precision, use CONST64_float_real, as 64bit transfer +// can only hold 40-bit values - 32 from const ext + 8 bit immediate. +let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in +def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32imm:$src1), + "$dst = ##$src1", + [(set IntRegs:$dst, fpimm:$src1)]>, + Requires<[HasV5T]>; + +def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2), + "if ($src1) $dst = ##$src2", + []>, + Requires<[HasV5T]>; + +let isPredicated = 1 in +def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2), + "if (!$src1) $dst = ##$src2", + []>, + Requires<[HasV5T]>; + +// Convert single precision to double precision and vice-versa. +def CONVERT_sf2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2df($src)", + [(set DoubleRegs:$dst, (fextend IntRegs:$src))]>, + Requires<[HasV5T]>; + +def CONVERT_df2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2sf($src)", + [(set IntRegs:$dst, (fround DoubleRegs:$src))]>, + Requires<[HasV5T]>; + + +// Load. +def LDrid_f : LDInst<(outs DoubleRegs:$dst), + (ins MEMri:$addr), + "$dst = memd($addr)", + [(set DoubleRegs:$dst, (f64 (load ADDRriS11_3:$addr)))]>, + Requires<[HasV5T]>; + + +let AddedComplexity = 20 in +def LDrid_indexed_f : LDInst<(outs DoubleRegs:$dst), + (ins IntRegs:$src1, s11_3Imm:$offset), + "$dst = memd($src1+#$offset)", + [(set DoubleRegs:$dst, (f64 (load (add IntRegs:$src1, + s11_3ImmPred:$offset))))]>, + Requires<[HasV5T]>; + +def LDriw_f : LDInst<(outs IntRegs:$dst), + (ins MEMri:$addr), "$dst = memw($addr)", + [(set IntRegs:$dst, (f32 (load ADDRriS11_2:$addr)))]>, + Requires<[HasV5T]>; + + +let AddedComplexity = 20 in +def LDriw_indexed_f : LDInst<(outs IntRegs:$dst), + (ins IntRegs:$src1, s11_2Imm:$offset), + "$dst = memw($src1+#$offset)", + [(set IntRegs:$dst, (f32 (load (add IntRegs:$src1, + s11_2ImmPred:$offset))))]>, + Requires<[HasV5T]>; + +// Store. +def STriw_f : STInst<(outs), + (ins MEMri:$addr, IntRegs:$src1), + "memw($addr) = $src1", + [(store (f32 IntRegs:$src1), ADDRriS11_2:$addr)]>, + Requires<[HasV5T]>; + +let AddedComplexity = 10 in +def STriw_indexed_f : STInst<(outs), + (ins IntRegs:$src1, s11_2Imm:$src2, IntRegs:$src3), + "memw($src1+#$src2) = $src3", + [(store (f32 IntRegs:$src3), + (add IntRegs:$src1, s11_2ImmPred:$src2))]>, + Requires<[HasV5T]>; + +def STrid_f : STInst<(outs), + (ins MEMri:$addr, DoubleRegs:$src1), + "memd($addr) = $src1", + [(store (f64 DoubleRegs:$src1), ADDRriS11_2:$addr)]>, + Requires<[HasV5T]>; + +// Indexed store double word. +let AddedComplexity = 10 in +def STrid_indexed_f : STInst<(outs), + (ins IntRegs:$src1, s11_3Imm:$src2, DoubleRegs:$src3), + "memd($src1+#$src2) = $src3", + [(store (f64 DoubleRegs:$src3), + (add IntRegs:$src1, s11_3ImmPred:$src2))]>, + Requires<[HasV5T]>; + + +// Add +let isCommutable = 1 in +def fADD_rr : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfadd($src1, $src2)", + [(set IntRegs:$dst, (fadd IntRegs:$src1, IntRegs:$src2))]>, + Requires<[HasV5T]>; + +let isCommutable = 1 in +def fADD64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = dfadd($src1, $src2)", + [(set DoubleRegs:$dst, (fadd DoubleRegs:$src1, + DoubleRegs:$src2))]>, + Requires<[HasV5T]>; + +def fSUB_rr : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfsub($src1, $src2)", + [(set IntRegs:$dst, (fsub IntRegs:$src1, IntRegs:$src2))]>, + Requires<[HasV5T]>; + +def fSUB64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = dfsub($src1, $src2)", + [(set DoubleRegs:$dst, (fsub DoubleRegs:$src1, + DoubleRegs:$src2))]>, + Requires<[HasV5T]>; + +let isCommutable = 1 in +def fMUL_rr : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfmpy($src1, $src2)", + [(set IntRegs:$dst, (fmul IntRegs:$src1, IntRegs:$src2))]>, + Requires<[HasV5T]>; + +let isCommutable = 1 in +def fMUL64_rr : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, + DoubleRegs:$src2), + "$dst = dfmpy($src1, $src2)", + [(set DoubleRegs:$dst, (fmul DoubleRegs:$src1, + DoubleRegs:$src2))]>, + Requires<[HasV5T]>; + +// Compare. +let isCompare = 1 in { +multiclass FCMP64_rr<string OpcStr, PatFrag OpNode> { + def _rr : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$b, DoubleRegs:$c), + !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), + [(set PredRegs:$dst, + (OpNode (f64 DoubleRegs:$b), (f64 DoubleRegs:$c)))]>, + Requires<[HasV5T]>; +} + +multiclass FCMP32_rr<string OpcStr, PatFrag OpNode> { + def _rr : ALU64_rr<(outs PredRegs:$dst), (ins IntRegs:$b, IntRegs:$c), + !strconcat("$dst = ", !strconcat(OpcStr, "($b, $c)")), + [(set PredRegs:$dst, + (OpNode (f32 IntRegs:$b), (f32 IntRegs:$c)))]>, + Requires<[HasV5T]>; +} +} + +defm FCMPOEQ64 : FCMP64_rr<"dfcmp.eq", setoeq>; +defm FCMPUEQ64 : FCMP64_rr<"dfcmp.eq", setueq>; +defm FCMPOGT64 : FCMP64_rr<"dfcmp.gt", setogt>; +defm FCMPUGT64 : FCMP64_rr<"dfcmp.gt", setugt>; +defm FCMPOGE64 : FCMP64_rr<"dfcmp.ge", setoge>; +defm FCMPUGE64 : FCMP64_rr<"dfcmp.ge", setuge>; + +defm FCMPOEQ32 : FCMP32_rr<"sfcmp.eq", setoeq>; +defm FCMPUEQ32 : FCMP32_rr<"sfcmp.eq", setueq>; +defm FCMPOGT32 : FCMP32_rr<"sfcmp.gt", setogt>; +defm FCMPUGT32 : FCMP32_rr<"sfcmp.gt", setugt>; +defm FCMPOGE32 : FCMP32_rr<"sfcmp.ge", setoge>; +defm FCMPUGE32 : FCMP32_rr<"sfcmp.ge", setuge>; + +// olt. +def : Pat <(i1 (setolt (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPOGT32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setolt (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPOGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + (f64 DoubleRegs:$src1)))>, + Requires<[HasV5T]>; + +// gt. +def : Pat <(i1 (setugt (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT64_rr (f64 DoubleRegs:$src1), + (f64 (CONST64_Float_Real fpimm:$src2))))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setugt (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT32_rr (f32 IntRegs:$src1), (f32 (TFRI_f fpimm:$src2))))>, + Requires<[HasV5T]>; + +// ult. +def : Pat <(i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setult (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT32_rr (f32 (TFRI_f fpimm:$src2)), (f32 IntRegs:$src1)))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat <(i1 (setult (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + (f64 DoubleRegs:$src1)))>, + Requires<[HasV5T]>; + +// le. +// rs <= rt -> rt >= rs. +def : Pat<(i1 (setole (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPOGE32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setole (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>, + Requires<[HasV5T]>; + + +// Rss <= Rtt -> Rtt >= Rss. +def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPOGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setole (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPOGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +// rs <= rt -> rt >= rs. +def : Pat<(i1 (setule (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (FCMPUGE32_rr IntRegs:$src2, IntRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setule (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGE32_rr (f32 (TFRI_f fpimm:$src2)), IntRegs:$src1))>, + Requires<[HasV5T]>; + +// Rss <= Rtt -> Rtt >= Rss. +def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (FCMPUGE64_rr DoubleRegs:$src2, DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setule (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (FCMPUGE64_rr (f64 (CONST64_Float_Real fpimm:$src2)), + DoubleRegs:$src1))>, + Requires<[HasV5T]>; + +// ne. +def : Pat<(i1 (setone (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, IntRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, IntRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1, DoubleRegs:$src2)))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setone (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPOEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setone (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPOEQ64_rr DoubleRegs:$src1, + (f64 (CONST64_Float_Real fpimm:$src2)))))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f32 IntRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPUEQ32_rr IntRegs:$src1, (f32 (TFRI_f fpimm:$src2)))))>, + Requires<[HasV5T]>; + +def : Pat<(i1 (setune (f64 DoubleRegs:$src1), (fpimm:$src2))), + (i1 (NOT_p (FCMPUEQ64_rr DoubleRegs:$src1, + (f64 (CONST64_Float_Real fpimm:$src2)))))>, + Requires<[HasV5T]>; + +// Convert Integer to Floating Point. +def CONVERT_d2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_d2sf($src)", + [(set (f32 IntRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_ud2sf : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_ud2sf($src)", + [(set (f32 IntRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_uw2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_uw2sf($src)", + [(set (f32 IntRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_w2sf : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_w2sf($src)", + [(set (f32 IntRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_d2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_d2df($src)", + [(set (f64 DoubleRegs:$dst), (sint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_ud2df : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_ud2df($src)", + [(set (f64 DoubleRegs:$dst), (uint_to_fp (i64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_uw2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_uw2df($src)", + [(set (f64 DoubleRegs:$dst), (uint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_w2df : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_w2df($src)", + [(set (f64 DoubleRegs:$dst), (sint_to_fp (i32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +// Convert Floating Point to Integer - default. +def CONVERT_df2uw : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2uw($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_df2w : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2w($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2uw : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2uw($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2w : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2w($src):chop", + [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_df2d : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2d($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_df2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2ud($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2d : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2d($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +def CONVERT_sf2ud : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2ud($src):chop", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T]>; + +// Convert Floating Point to Integer: non-chopped. +let AddedComplexity = 20 in +def CONVERT_df2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2uw($src)", + [(set (i32 IntRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_df2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2w($src)", + [(set (i32 IntRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2uw_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2uw($src)", + [(set (i32 IntRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2w_nchop : ALU64_rr<(outs IntRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2w($src)", + [(set (i32 IntRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_df2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2d($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_df2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src), + "$dst = convert_df2ud($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f64 DoubleRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2d_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2d($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_sint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + +let AddedComplexity = 20 in +def CONVERT_sf2ud_nchop : ALU64_rr<(outs DoubleRegs:$dst), (ins IntRegs:$src), + "$dst = convert_sf2ud($src)", + [(set (i64 DoubleRegs:$dst), (fp_to_uint (f32 IntRegs:$src)))]>, + Requires<[HasV5T, IEEERndNearV5T]>; + + + +// Bitcast is different than [fp|sint|uint]_to_[sint|uint|fp]. +def : Pat <(i32 (bitconvert (f32 IntRegs:$src))), + (i32 (TFR IntRegs:$src))>, + Requires<[HasV5T]>; + +def : Pat <(f32 (bitconvert (i32 IntRegs:$src))), + (f32 (TFR IntRegs:$src))>, + Requires<[HasV5T]>; + +def : Pat <(i64 (bitconvert (f64 DoubleRegs:$src))), + (i64 (TFR64 DoubleRegs:$src))>, + Requires<[HasV5T]>; + +def : Pat <(f64 (bitconvert (i64 DoubleRegs:$src))), + (f64 (TFR64 DoubleRegs:$src))>, + Requires<[HasV5T]>; + +// Floating point fused multiply-add. +def FMADD_dp : ALU64_acc<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), + "$dst += dfmpy($src2, $src3)", + [(set (f64 DoubleRegs:$dst), + (fma DoubleRegs:$src2, DoubleRegs:$src3, DoubleRegs:$src1))], + "$src1 = $dst">, + Requires<[HasV5T]>; + +def FMADD_sp : ALU64_acc<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), + "$dst += sfmpy($src2, $src3)", + [(set (f32 IntRegs:$dst), + (fma IntRegs:$src2, IntRegs:$src3, IntRegs:$src1))], + "$src1 = $dst">, + Requires<[HasV5T]>; + + +// Floating point max/min. +let AddedComplexity = 100 in +def FMAX_dp : ALU64_rr<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2), + "$dst = dfmax($src1, $src2)", + [(set DoubleRegs:$dst, (f64 (select (i1 (setolt DoubleRegs:$src2, + DoubleRegs:$src1)), + DoubleRegs:$src1, + DoubleRegs:$src2)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100 in +def FMAX_sp : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfmax($src1, $src2)", + [(set IntRegs:$dst, (f32 (select (i1 (setolt IntRegs:$src2, + IntRegs:$src1)), + IntRegs:$src1, + IntRegs:$src2)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100 in +def FMIN_dp : ALU64_rr<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, DoubleRegs:$src2), + "$dst = dfmin($src1, $src2)", + [(set DoubleRegs:$dst, (f64 (select (i1 (setogt DoubleRegs:$src2, + DoubleRegs:$src1)), + DoubleRegs:$src1, + DoubleRegs:$src2)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100 in +def FMIN_sp : ALU64_rr<(outs IntRegs:$dst), + (ins IntRegs:$src1, IntRegs:$src2), + "$dst = sfmin($src1, $src2)", + [(set IntRegs:$dst, (f32 (select (i1 (setogt IntRegs:$src2, + IntRegs:$src1)), + IntRegs:$src1, + IntRegs:$src2)))]>, + Requires<[HasV5T]>; + +// Pseudo instruction to encode a set of conditional transfers. +// This instruction is used instead of a mux and trades-off codesize +// for performance. We conduct this transformation optimistically in +// the hope that these instructions get promoted to dot-new transfers. +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_rr_f : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, + IntRegs:$src2, + IntRegs:$src3), + "Error; should not emit", + [(set IntRegs:$dst, (f32 (select PredRegs:$src1, + IntRegs:$src2, + IntRegs:$src3)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_rr64_f : ALU32_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, + DoubleRegs:$src2, + DoubleRegs:$src3), + "Error; should not emit", + [(set DoubleRegs:$dst, (f64 (select PredRegs:$src1, + DoubleRegs:$src2, + DoubleRegs:$src3)))]>, + Requires<[HasV5T]>; + + + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_ri_f : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$src1, IntRegs:$src2, f32imm:$src3), + "Error; should not emit", + [(set IntRegs:$dst, + (f32 (select PredRegs:$src1, IntRegs:$src2, fpimm:$src3)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_ir_f : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2, IntRegs:$src3), + "Error; should not emit", + [(set IntRegs:$dst, + (f32 (select PredRegs:$src1, fpimm:$src2, IntRegs:$src3)))]>, + Requires<[HasV5T]>; + +let AddedComplexity = 100, isPredicated = 1 in +def TFR_condset_ii_f : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$src1, f32imm:$src2, f32imm:$src3), + "Error; should not emit", + [(set IntRegs:$dst, (f32 (select PredRegs:$src1, + fpimm:$src2, + fpimm:$src3)))]>, + Requires<[HasV5T]>; + + +def : Pat <(select (i1 (setult (f32 IntRegs:$src1), (f32 IntRegs:$src2))), + (f32 IntRegs:$src3), + (f32 IntRegs:$src4)), + (TFR_condset_rr_f (FCMPUGT32_rr IntRegs:$src2, IntRegs:$src1), IntRegs:$src4, + IntRegs:$src3)>, Requires<[HasV5T]>; + +def : Pat <(select (i1 (setult (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), + (f64 DoubleRegs:$src3), + (f64 DoubleRegs:$src4)), + (TFR_condset_rr64_f (FCMPUGT64_rr DoubleRegs:$src2, DoubleRegs:$src1), + DoubleRegs:$src4, DoubleRegs:$src3)>, Requires<[HasV5T]>; + +// Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i). +def : Pat <(select (not PredRegs:$src1), fpimm:$src2, fpimm:$src3), + (TFR_condset_ii_f PredRegs:$src1, fpimm:$src3, fpimm:$src2)>; + +// Map from p0 = pnot(p0); r0 = select(p0, #i, r1) +// => r0 = TFR_condset_ri(p0, r1, #i) +def : Pat <(select (not PredRegs:$src1), fpimm:$src2, IntRegs:$src3), + (TFR_condset_ri_f PredRegs:$src1, IntRegs:$src3, fpimm:$src2)>; + +// Map from p0 = pnot(p0); r0 = mux(p0, r1, #i) +// => r0 = TFR_condset_ir(p0, #i, r1) +def : Pat <(select (not PredRegs:$src1), IntRegs:$src2, fpimm:$src3), + (TFR_condset_ir_f PredRegs:$src1, fpimm:$src3, IntRegs:$src2)>; + +def : Pat <(i32 (fp_to_sint (f64 DoubleRegs:$src1))), + (i32 (EXTRACT_SUBREG (i64 (CONVERT_df2d (f64 DoubleRegs:$src1))), subreg_loreg))>, + Requires<[HasV5T]>; + +def : Pat <(fabs (f32 IntRegs:$src1)), + (CLRBIT_31 (f32 IntRegs:$src1), 31)>, + Requires<[HasV5T]>; + +def : Pat <(fneg (f32 IntRegs:$src1)), + (TOGBIT_31 (f32 IntRegs:$src1), 31)>, + Requires<[HasV5T]>; + +/* +def : Pat <(fabs (f64 DoubleRegs:$src1)), + (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>, + Requires<[HasV5T]>; + +def : Pat <(fabs (f64 DoubleRegs:$src1)), + (CLRBIT_31 (f32 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), 31)>, + Requires<[HasV5T]>; + */ diff --git a/lib/Target/Hexagon/HexagonIntrinsics.td b/lib/Target/Hexagon/HexagonIntrinsics.td index b15e293..99f59d5 100644 --- a/lib/Target/Hexagon/HexagonIntrinsics.td +++ b/lib/Target/Hexagon/HexagonIntrinsics.td @@ -551,13 +551,6 @@ class di_SInst_diu6u6<string opc, Intrinsic IntID> [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2, imm:$src3))]>; -class di_SInst_didisi<string opc, Intrinsic IntID> - : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, - IntRegs:$src3), - !strconcat("$dst = ", !strconcat(opc , "($src1, $src2, $src3)")), - [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2, - IntRegs:$src3))]>; - class di_SInst_didiqi<string opc, Intrinsic IntID> : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), @@ -818,6 +811,11 @@ class di_MInst_s8s8<string opc, Intrinsic IntID> !strconcat("$dst = ", !strconcat(opc , "(#$src1, #$src2)")), [(set DoubleRegs:$dst, (IntID imm:$src1, imm:$src2))]>; +class si_MInst_sis9<string opc, Intrinsic IntID> + : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>; + class si_MInst_sisi<string opc, Intrinsic IntID> : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), @@ -952,6 +950,17 @@ class si_SInst_sisi_sat<string opc, Intrinsic IntID> !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")), [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>; +class si_SInst_didi_sat<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2):sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; + +class si_SInst_disi_s1_rnd_sat<string opc, Intrinsic IntID> + : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, IntRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , + "($src1, $src2):<<1:rnd:sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, IntRegs:$src2))]>; + class si_MInst_sisi_s1_rnd_sat<string opc, Intrinsic IntID> : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), !strconcat("$dst = ", !strconcat(opc , @@ -1612,6 +1621,18 @@ class di_MInst_dididi_acc_rnd_sat<string opc, Intrinsic IntID> DoubleRegs:$src2))], "$dst2 = $dst">; +class di_MInst_dididi_acc_s1<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, + DoubleRegs:$src1, + DoubleRegs:$src2), + !strconcat("$dst += ", + !strconcat(opc , "($src1, $src2):<<1")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, + DoubleRegs:$src1, + DoubleRegs:$src2))], + "$dst2 = $dst">; + + class di_MInst_dididi_acc_s1_sat<string opc, Intrinsic IntID> : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1, @@ -1822,53 +1843,63 @@ class si_MInst_didi<string opc, Intrinsic IntID> !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), [(set IntRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; +// +// LDInst classes. +// +let mayLoad = 1, neverHasSideEffects = 1 in +class di_LDInstPI_diu4<string opc, Intrinsic IntID> + : LDInstPI<(outs IntRegs:$dst, DoubleRegs:$dst2), + (ins IntRegs:$src1, IntRegs:$src2, CRRegs:$src3, s4Imm:$offset), + "$dst2 = memd($src1++#$offset:circ($src3))", + [], + "$src1 = $dst">; /******************************************************************** * ALU32/ALU * *********************************************************************/ // ALU32 / ALU / Add. -def Hexagon_A2_add: +def HEXAGON_A2_add: si_ALU32_sisi <"add", int_hexagon_A2_add>; -def Hexagon_A2_addi: +def HEXAGON_A2_addi: si_ALU32_sis16 <"add", int_hexagon_A2_addi>; // ALU32 / ALU / Logical operations. -def Hexagon_A2_and: +def HEXAGON_A2_and: si_ALU32_sisi <"and", int_hexagon_A2_and>; -def Hexagon_A2_andir: +def HEXAGON_A2_andir: si_ALU32_sis10 <"and", int_hexagon_A2_andir>; -def Hexagon_A2_not: +def HEXAGON_A2_not: si_ALU32_si <"not", int_hexagon_A2_not>; -def Hexagon_A2_or: +def HEXAGON_A2_or: si_ALU32_sisi <"or", int_hexagon_A2_or>; -def Hexagon_A2_orir: +def HEXAGON_A2_orir: si_ALU32_sis10 <"or", int_hexagon_A2_orir>; -def Hexagon_A2_xor: +def HEXAGON_A2_xor: si_ALU32_sisi <"xor", int_hexagon_A2_xor>; // ALU32 / ALU / Negate. -def Hexagon_A2_neg: +def HEXAGON_A2_neg: si_ALU32_si <"neg", int_hexagon_A2_neg>; // ALU32 / ALU / Subtract. -def Hexagon_A2_sub: +def HEXAGON_A2_sub: si_ALU32_sisi <"sub", int_hexagon_A2_sub>; -def Hexagon_A2_subri: +def HEXAGON_A2_subri: si_ALU32_s10si <"sub", int_hexagon_A2_subri>; // ALU32 / ALU / Transfer Immediate. -def Hexagon_A2_tfril: +def HEXAGON_A2_tfril: si_lo_ALU32_siu16 <"", int_hexagon_A2_tfril>; -def Hexagon_A2_tfrih: +def HEXAGON_A2_tfrih: si_hi_ALU32_siu16 <"", int_hexagon_A2_tfrih>; -def Hexagon_A2_tfrsi: +def HEXAGON_A2_tfrsi: si_ALU32_s16 <"", int_hexagon_A2_tfrsi>; -def Hexagon_A2_tfrpi: +def HEXAGON_A2_tfrpi: di_ALU32_s8 <"", int_hexagon_A2_tfrpi>; // ALU32 / ALU / Transfer Register. -def Hexagon_A2_tfr: +def HEXAGON_A2_tfr: si_ALU32_si_tfr <"", int_hexagon_A2_tfr>; /******************************************************************** @@ -1876,45 +1907,45 @@ def Hexagon_A2_tfr: *********************************************************************/ // ALU32 / PERM / Combine. -def Hexagon_A2_combinew: +def HEXAGON_A2_combinew: di_ALU32_sisi <"combine", int_hexagon_A2_combinew>; -def Hexagon_A2_combine_hh: +def HEXAGON_A2_combine_hh: si_MInst_sisi_hh <"combine", int_hexagon_A2_combine_hh>; -def Hexagon_A2_combine_lh: +def HEXAGON_A2_combine_lh: si_MInst_sisi_lh <"combine", int_hexagon_A2_combine_lh>; -def Hexagon_A2_combine_hl: +def HEXAGON_A2_combine_hl: si_MInst_sisi_hl <"combine", int_hexagon_A2_combine_hl>; -def Hexagon_A2_combine_ll: +def HEXAGON_A2_combine_ll: si_MInst_sisi_ll <"combine", int_hexagon_A2_combine_ll>; -def Hexagon_A2_combineii: +def HEXAGON_A2_combineii: di_MInst_s8s8 <"combine", int_hexagon_A2_combineii>; // ALU32 / PERM / Mux. -def Hexagon_C2_mux: +def HEXAGON_C2_mux: si_ALU32_qisisi <"mux", int_hexagon_C2_mux>; -def Hexagon_C2_muxri: +def HEXAGON_C2_muxri: si_ALU32_qis8si <"mux", int_hexagon_C2_muxri>; -def Hexagon_C2_muxir: +def HEXAGON_C2_muxir: si_ALU32_qisis8 <"mux", int_hexagon_C2_muxir>; -def Hexagon_C2_muxii: +def HEXAGON_C2_muxii: si_ALU32_qis8s8 <"mux", int_hexagon_C2_muxii>; // ALU32 / PERM / Shift halfword. -def Hexagon_A2_aslh: +def HEXAGON_A2_aslh: si_ALU32_si <"aslh", int_hexagon_A2_aslh>; -def Hexagon_A2_asrh: +def HEXAGON_A2_asrh: si_ALU32_si <"asrh", int_hexagon_A2_asrh>; def SI_to_SXTHI_asrh: si_ALU32_si <"asrh", int_hexagon_SI_to_SXTHI_asrh>; // ALU32 / PERM / Sign/zero extend. -def Hexagon_A2_sxth: +def HEXAGON_A2_sxth: si_ALU32_si <"sxth", int_hexagon_A2_sxth>; -def Hexagon_A2_sxtb: +def HEXAGON_A2_sxtb: si_ALU32_si <"sxtb", int_hexagon_A2_sxtb>; -def Hexagon_A2_zxth: +def HEXAGON_A2_zxth: si_ALU32_si <"zxth", int_hexagon_A2_zxth>; -def Hexagon_A2_zxtb: +def HEXAGON_A2_zxtb: si_ALU32_si <"zxtb", int_hexagon_A2_zxtb>; /******************************************************************** @@ -1922,25 +1953,25 @@ def Hexagon_A2_zxtb: *********************************************************************/ // ALU32 / PRED / Compare. -def Hexagon_C2_cmpeq: +def HEXAGON_C2_cmpeq: qi_ALU32_sisi <"cmp.eq", int_hexagon_C2_cmpeq>; -def Hexagon_C2_cmpeqi: +def HEXAGON_C2_cmpeqi: qi_ALU32_sis10 <"cmp.eq", int_hexagon_C2_cmpeqi>; -def Hexagon_C2_cmpgei: +def HEXAGON_C2_cmpgei: qi_ALU32_sis8 <"cmp.ge", int_hexagon_C2_cmpgei>; -def Hexagon_C2_cmpgeui: +def HEXAGON_C2_cmpgeui: qi_ALU32_siu8 <"cmp.geu", int_hexagon_C2_cmpgeui>; -def Hexagon_C2_cmpgt: +def HEXAGON_C2_cmpgt: qi_ALU32_sisi <"cmp.gt", int_hexagon_C2_cmpgt>; -def Hexagon_C2_cmpgti: +def HEXAGON_C2_cmpgti: qi_ALU32_sis10 <"cmp.gt", int_hexagon_C2_cmpgti>; -def Hexagon_C2_cmpgtu: +def HEXAGON_C2_cmpgtu: qi_ALU32_sisi <"cmp.gtu", int_hexagon_C2_cmpgtu>; -def Hexagon_C2_cmpgtui: +def HEXAGON_C2_cmpgtui: qi_ALU32_siu9 <"cmp.gtu", int_hexagon_C2_cmpgtui>; -def Hexagon_C2_cmplt: +def HEXAGON_C2_cmplt: qi_ALU32_sisi <"cmp.lt", int_hexagon_C2_cmplt>; -def Hexagon_C2_cmpltu: +def HEXAGON_C2_cmpltu: qi_ALU32_sisi <"cmp.ltu", int_hexagon_C2_cmpltu>; /******************************************************************** @@ -1949,27 +1980,27 @@ def Hexagon_C2_cmpltu: // ALU32 / VH / Vector add halfwords. // Rd32=vadd[u]h(Rs32,Rt32:sat] -def Hexagon_A2_svaddh: +def HEXAGON_A2_svaddh: si_ALU32_sisi <"vaddh", int_hexagon_A2_svaddh>; -def Hexagon_A2_svaddhs: +def HEXAGON_A2_svaddhs: si_ALU32_sisi_sat <"vaddh", int_hexagon_A2_svaddhs>; -def Hexagon_A2_svadduhs: +def HEXAGON_A2_svadduhs: si_ALU32_sisi_sat <"vadduh", int_hexagon_A2_svadduhs>; // ALU32 / VH / Vector average halfwords. -def Hexagon_A2_svavgh: +def HEXAGON_A2_svavgh: si_ALU32_sisi <"vavgh", int_hexagon_A2_svavgh>; -def Hexagon_A2_svavghs: +def HEXAGON_A2_svavghs: si_ALU32_sisi_rnd <"vavgh", int_hexagon_A2_svavghs>; -def Hexagon_A2_svnavgh: +def HEXAGON_A2_svnavgh: si_ALU32_sisi <"vnavgh", int_hexagon_A2_svnavgh>; // ALU32 / VH / Vector subtract halfwords. -def Hexagon_A2_svsubh: +def HEXAGON_A2_svsubh: si_ALU32_sisi <"vsubh", int_hexagon_A2_svsubh>; -def Hexagon_A2_svsubhs: +def HEXAGON_A2_svsubhs: si_ALU32_sisi_sat <"vsubh", int_hexagon_A2_svsubhs>; -def Hexagon_A2_svsubuhs: +def HEXAGON_A2_svsubuhs: si_ALU32_sisi_sat <"vsubuh", int_hexagon_A2_svsubuhs>; /******************************************************************** @@ -1977,109 +2008,109 @@ def Hexagon_A2_svsubuhs: *********************************************************************/ // ALU64 / ALU / Add. -def Hexagon_A2_addp: +def HEXAGON_A2_addp: di_ALU64_didi <"add", int_hexagon_A2_addp>; -def Hexagon_A2_addsat: +def HEXAGON_A2_addsat: si_ALU64_sisi_sat <"add", int_hexagon_A2_addsat>; // ALU64 / ALU / Add halfword. // Even though the definition says hl, it should be lh - //so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits. -def Hexagon_A2_addh_l16_hl: +def HEXAGON_A2_addh_l16_hl: si_ALU64_sisi_l16_lh <"add", int_hexagon_A2_addh_l16_hl>; -def Hexagon_A2_addh_l16_ll: +def HEXAGON_A2_addh_l16_ll: si_ALU64_sisi_l16_ll <"add", int_hexagon_A2_addh_l16_ll>; -def Hexagon_A2_addh_l16_sat_hl: +def HEXAGON_A2_addh_l16_sat_hl: si_ALU64_sisi_l16_sat_lh <"add", int_hexagon_A2_addh_l16_sat_hl>; -def Hexagon_A2_addh_l16_sat_ll: +def HEXAGON_A2_addh_l16_sat_ll: si_ALU64_sisi_l16_sat_ll <"add", int_hexagon_A2_addh_l16_sat_ll>; -def Hexagon_A2_addh_h16_hh: +def HEXAGON_A2_addh_h16_hh: si_ALU64_sisi_h16_hh <"add", int_hexagon_A2_addh_h16_hh>; -def Hexagon_A2_addh_h16_hl: +def HEXAGON_A2_addh_h16_hl: si_ALU64_sisi_h16_hl <"add", int_hexagon_A2_addh_h16_hl>; -def Hexagon_A2_addh_h16_lh: +def HEXAGON_A2_addh_h16_lh: si_ALU64_sisi_h16_lh <"add", int_hexagon_A2_addh_h16_lh>; -def Hexagon_A2_addh_h16_ll: +def HEXAGON_A2_addh_h16_ll: si_ALU64_sisi_h16_ll <"add", int_hexagon_A2_addh_h16_ll>; -def Hexagon_A2_addh_h16_sat_hh: +def HEXAGON_A2_addh_h16_sat_hh: si_ALU64_sisi_h16_sat_hh <"add", int_hexagon_A2_addh_h16_sat_hh>; -def Hexagon_A2_addh_h16_sat_hl: +def HEXAGON_A2_addh_h16_sat_hl: si_ALU64_sisi_h16_sat_hl <"add", int_hexagon_A2_addh_h16_sat_hl>; -def Hexagon_A2_addh_h16_sat_lh: +def HEXAGON_A2_addh_h16_sat_lh: si_ALU64_sisi_h16_sat_lh <"add", int_hexagon_A2_addh_h16_sat_lh>; -def Hexagon_A2_addh_h16_sat_ll: +def HEXAGON_A2_addh_h16_sat_ll: si_ALU64_sisi_h16_sat_ll <"add", int_hexagon_A2_addh_h16_sat_ll>; // ALU64 / ALU / Compare. -def Hexagon_C2_cmpeqp: +def HEXAGON_C2_cmpeqp: qi_ALU64_didi <"cmp.eq", int_hexagon_C2_cmpeqp>; -def Hexagon_C2_cmpgtp: +def HEXAGON_C2_cmpgtp: qi_ALU64_didi <"cmp.gt", int_hexagon_C2_cmpgtp>; -def Hexagon_C2_cmpgtup: +def HEXAGON_C2_cmpgtup: qi_ALU64_didi <"cmp.gtu", int_hexagon_C2_cmpgtup>; // ALU64 / ALU / Logical operations. -def Hexagon_A2_andp: +def HEXAGON_A2_andp: di_ALU64_didi <"and", int_hexagon_A2_andp>; -def Hexagon_A2_orp: +def HEXAGON_A2_orp: di_ALU64_didi <"or", int_hexagon_A2_orp>; -def Hexagon_A2_xorp: +def HEXAGON_A2_xorp: di_ALU64_didi <"xor", int_hexagon_A2_xorp>; // ALU64 / ALU / Maximum. -def Hexagon_A2_max: +def HEXAGON_A2_max: si_ALU64_sisi <"max", int_hexagon_A2_max>; -def Hexagon_A2_maxu: +def HEXAGON_A2_maxu: si_ALU64_sisi <"maxu", int_hexagon_A2_maxu>; // ALU64 / ALU / Minimum. -def Hexagon_A2_min: +def HEXAGON_A2_min: si_ALU64_sisi <"min", int_hexagon_A2_min>; -def Hexagon_A2_minu: +def HEXAGON_A2_minu: si_ALU64_sisi <"minu", int_hexagon_A2_minu>; // ALU64 / ALU / Subtract. -def Hexagon_A2_subp: +def HEXAGON_A2_subp: di_ALU64_didi <"sub", int_hexagon_A2_subp>; -def Hexagon_A2_subsat: +def HEXAGON_A2_subsat: si_ALU64_sisi_sat <"sub", int_hexagon_A2_subsat>; // ALU64 / ALU / Subtract halfword. // Even though the definition says hl, it should be lh - //so DON'T change the class " si_ALU64_sisi_l16_lh " it inherits. -def Hexagon_A2_subh_l16_hl: +def HEXAGON_A2_subh_l16_hl: si_ALU64_sisi_l16_lh <"sub", int_hexagon_A2_subh_l16_hl>; -def Hexagon_A2_subh_l16_ll: +def HEXAGON_A2_subh_l16_ll: si_ALU64_sisi_l16_ll <"sub", int_hexagon_A2_subh_l16_ll>; -def Hexagon_A2_subh_l16_sat_hl: +def HEXAGON_A2_subh_l16_sat_hl: si_ALU64_sisi_l16_sat_lh <"sub", int_hexagon_A2_subh_l16_sat_hl>; -def Hexagon_A2_subh_l16_sat_ll: +def HEXAGON_A2_subh_l16_sat_ll: si_ALU64_sisi_l16_sat_ll <"sub", int_hexagon_A2_subh_l16_sat_ll>; -def Hexagon_A2_subh_h16_hh: +def HEXAGON_A2_subh_h16_hh: si_ALU64_sisi_h16_hh <"sub", int_hexagon_A2_subh_h16_hh>; -def Hexagon_A2_subh_h16_hl: +def HEXAGON_A2_subh_h16_hl: si_ALU64_sisi_h16_hl <"sub", int_hexagon_A2_subh_h16_hl>; -def Hexagon_A2_subh_h16_lh: +def HEXAGON_A2_subh_h16_lh: si_ALU64_sisi_h16_lh <"sub", int_hexagon_A2_subh_h16_lh>; -def Hexagon_A2_subh_h16_ll: +def HEXAGON_A2_subh_h16_ll: si_ALU64_sisi_h16_ll <"sub", int_hexagon_A2_subh_h16_ll>; -def Hexagon_A2_subh_h16_sat_hh: +def HEXAGON_A2_subh_h16_sat_hh: si_ALU64_sisi_h16_sat_hh <"sub", int_hexagon_A2_subh_h16_sat_hh>; -def Hexagon_A2_subh_h16_sat_hl: +def HEXAGON_A2_subh_h16_sat_hl: si_ALU64_sisi_h16_sat_hl <"sub", int_hexagon_A2_subh_h16_sat_hl>; -def Hexagon_A2_subh_h16_sat_lh: +def HEXAGON_A2_subh_h16_sat_lh: si_ALU64_sisi_h16_sat_lh <"sub", int_hexagon_A2_subh_h16_sat_lh>; -def Hexagon_A2_subh_h16_sat_ll: +def HEXAGON_A2_subh_h16_sat_ll: si_ALU64_sisi_h16_sat_ll <"sub", int_hexagon_A2_subh_h16_sat_ll>; // ALU64 / ALU / Transfer register. -def Hexagon_A2_tfrp: +def HEXAGON_A2_tfrp: di_ALU64_di <"", int_hexagon_A2_tfrp>; /******************************************************************** @@ -2087,7 +2118,7 @@ def Hexagon_A2_tfrp: *********************************************************************/ // ALU64 / BIT / Masked parity. -def Hexagon_S2_parityp: +def HEXAGON_S2_parityp: si_ALU64_didi <"parity", int_hexagon_S2_parityp>; /******************************************************************** @@ -2095,7 +2126,7 @@ def Hexagon_S2_parityp: *********************************************************************/ // ALU64 / PERM / Vector pack high and low halfwords. -def Hexagon_S2_packhl: +def HEXAGON_S2_packhl: di_ALU64_sisi <"packhl", int_hexagon_S2_packhl>; /******************************************************************** @@ -2103,37 +2134,37 @@ def Hexagon_S2_packhl: *********************************************************************/ // ALU64 / VB / Vector add unsigned bytes. -def Hexagon_A2_vaddub: +def HEXAGON_A2_vaddub: di_ALU64_didi <"vaddub", int_hexagon_A2_vaddub>; -def Hexagon_A2_vaddubs: +def HEXAGON_A2_vaddubs: di_ALU64_didi_sat <"vaddub", int_hexagon_A2_vaddubs>; // ALU64 / VB / Vector average unsigned bytes. -def Hexagon_A2_vavgub: +def HEXAGON_A2_vavgub: di_ALU64_didi <"vavgub", int_hexagon_A2_vavgub>; -def Hexagon_A2_vavgubr: +def HEXAGON_A2_vavgubr: di_ALU64_didi_rnd <"vavgub", int_hexagon_A2_vavgubr>; // ALU64 / VB / Vector compare unsigned bytes. -def Hexagon_A2_vcmpbeq: +def HEXAGON_A2_vcmpbeq: qi_ALU64_didi <"vcmpb.eq", int_hexagon_A2_vcmpbeq>; -def Hexagon_A2_vcmpbgtu: +def HEXAGON_A2_vcmpbgtu: qi_ALU64_didi <"vcmpb.gtu",int_hexagon_A2_vcmpbgtu>; // ALU64 / VB / Vector maximum/minimum unsigned bytes. -def Hexagon_A2_vmaxub: +def HEXAGON_A2_vmaxub: di_ALU64_didi <"vmaxub", int_hexagon_A2_vmaxub>; -def Hexagon_A2_vminub: +def HEXAGON_A2_vminub: di_ALU64_didi <"vminub", int_hexagon_A2_vminub>; // ALU64 / VB / Vector subtract unsigned bytes. -def Hexagon_A2_vsubub: +def HEXAGON_A2_vsubub: di_ALU64_didi <"vsubub", int_hexagon_A2_vsubub>; -def Hexagon_A2_vsububs: +def HEXAGON_A2_vsububs: di_ALU64_didi_sat <"vsubub", int_hexagon_A2_vsububs>; // ALU64 / VB / Vector mux. -def Hexagon_C2_vmux: +def HEXAGON_C2_vmux: di_ALU64_qididi <"vmux", int_hexagon_C2_vmux>; @@ -2143,58 +2174,58 @@ def Hexagon_C2_vmux: // ALU64 / VH / Vector add halfwords. // Rdd64=vadd[u]h(Rss64,Rtt64:sat] -def Hexagon_A2_vaddh: +def HEXAGON_A2_vaddh: di_ALU64_didi <"vaddh", int_hexagon_A2_vaddh>; -def Hexagon_A2_vaddhs: +def HEXAGON_A2_vaddhs: di_ALU64_didi_sat <"vaddh", int_hexagon_A2_vaddhs>; -def Hexagon_A2_vadduhs: +def HEXAGON_A2_vadduhs: di_ALU64_didi_sat <"vadduh", int_hexagon_A2_vadduhs>; // ALU64 / VH / Vector average halfwords. // Rdd64=v[n]avg[u]h(Rss64,Rtt64:rnd/:crnd][:sat] -def Hexagon_A2_vavgh: +def HEXAGON_A2_vavgh: di_ALU64_didi <"vavgh", int_hexagon_A2_vavgh>; -def Hexagon_A2_vavghcr: +def HEXAGON_A2_vavghcr: di_ALU64_didi_crnd <"vavgh", int_hexagon_A2_vavghcr>; -def Hexagon_A2_vavghr: +def HEXAGON_A2_vavghr: di_ALU64_didi_rnd <"vavgh", int_hexagon_A2_vavghr>; -def Hexagon_A2_vavguh: +def HEXAGON_A2_vavguh: di_ALU64_didi <"vavguh", int_hexagon_A2_vavguh>; -def Hexagon_A2_vavguhr: +def HEXAGON_A2_vavguhr: di_ALU64_didi_rnd <"vavguh", int_hexagon_A2_vavguhr>; -def Hexagon_A2_vnavgh: +def HEXAGON_A2_vnavgh: di_ALU64_didi <"vnavgh", int_hexagon_A2_vnavgh>; -def Hexagon_A2_vnavghcr: +def HEXAGON_A2_vnavghcr: di_ALU64_didi_crnd_sat <"vnavgh", int_hexagon_A2_vnavghcr>; -def Hexagon_A2_vnavghr: +def HEXAGON_A2_vnavghr: di_ALU64_didi_rnd_sat <"vnavgh", int_hexagon_A2_vnavghr>; // ALU64 / VH / Vector compare halfwords. -def Hexagon_A2_vcmpheq: +def HEXAGON_A2_vcmpheq: qi_ALU64_didi <"vcmph.eq", int_hexagon_A2_vcmpheq>; -def Hexagon_A2_vcmphgt: +def HEXAGON_A2_vcmphgt: qi_ALU64_didi <"vcmph.gt", int_hexagon_A2_vcmphgt>; -def Hexagon_A2_vcmphgtu: +def HEXAGON_A2_vcmphgtu: qi_ALU64_didi <"vcmph.gtu",int_hexagon_A2_vcmphgtu>; // ALU64 / VH / Vector maximum halfwords. -def Hexagon_A2_vmaxh: +def HEXAGON_A2_vmaxh: di_ALU64_didi <"vmaxh", int_hexagon_A2_vmaxh>; -def Hexagon_A2_vmaxuh: +def HEXAGON_A2_vmaxuh: di_ALU64_didi <"vmaxuh", int_hexagon_A2_vmaxuh>; // ALU64 / VH / Vector minimum halfwords. -def Hexagon_A2_vminh: +def HEXAGON_A2_vminh: di_ALU64_didi <"vminh", int_hexagon_A2_vminh>; -def Hexagon_A2_vminuh: +def HEXAGON_A2_vminuh: di_ALU64_didi <"vminuh", int_hexagon_A2_vminuh>; // ALU64 / VH / Vector subtract halfwords. -def Hexagon_A2_vsubh: +def HEXAGON_A2_vsubh: di_ALU64_didi <"vsubh", int_hexagon_A2_vsubh>; -def Hexagon_A2_vsubhs: +def HEXAGON_A2_vsubhs: di_ALU64_didi_sat <"vsubh", int_hexagon_A2_vsubhs>; -def Hexagon_A2_vsubuhs: +def HEXAGON_A2_vsubuhs: di_ALU64_didi_sat <"vsubuh", int_hexagon_A2_vsubuhs>; @@ -2204,53 +2235,53 @@ def Hexagon_A2_vsubuhs: // ALU64 / VW / Vector add words. // Rdd32=vaddw(Rss32,Rtt32)[:sat] -def Hexagon_A2_vaddw: +def HEXAGON_A2_vaddw: di_ALU64_didi <"vaddw", int_hexagon_A2_vaddw>; -def Hexagon_A2_vaddws: +def HEXAGON_A2_vaddws: di_ALU64_didi_sat <"vaddw", int_hexagon_A2_vaddws>; // ALU64 / VW / Vector average words. -def Hexagon_A2_vavguw: +def HEXAGON_A2_vavguw: di_ALU64_didi <"vavguw", int_hexagon_A2_vavguw>; -def Hexagon_A2_vavguwr: +def HEXAGON_A2_vavguwr: di_ALU64_didi_rnd <"vavguw", int_hexagon_A2_vavguwr>; -def Hexagon_A2_vavgw: +def HEXAGON_A2_vavgw: di_ALU64_didi <"vavgw", int_hexagon_A2_vavgw>; -def Hexagon_A2_vavgwcr: +def HEXAGON_A2_vavgwcr: di_ALU64_didi_crnd <"vavgw", int_hexagon_A2_vavgwcr>; -def Hexagon_A2_vavgwr: +def HEXAGON_A2_vavgwr: di_ALU64_didi_rnd <"vavgw", int_hexagon_A2_vavgwr>; -def Hexagon_A2_vnavgw: +def HEXAGON_A2_vnavgw: di_ALU64_didi <"vnavgw", int_hexagon_A2_vnavgw>; -def Hexagon_A2_vnavgwcr: +def HEXAGON_A2_vnavgwcr: di_ALU64_didi_crnd_sat <"vnavgw", int_hexagon_A2_vnavgwcr>; -def Hexagon_A2_vnavgwr: +def HEXAGON_A2_vnavgwr: di_ALU64_didi_rnd_sat <"vnavgw", int_hexagon_A2_vnavgwr>; // ALU64 / VW / Vector compare words. -def Hexagon_A2_vcmpweq: +def HEXAGON_A2_vcmpweq: qi_ALU64_didi <"vcmpw.eq", int_hexagon_A2_vcmpweq>; -def Hexagon_A2_vcmpwgt: +def HEXAGON_A2_vcmpwgt: qi_ALU64_didi <"vcmpw.gt", int_hexagon_A2_vcmpwgt>; -def Hexagon_A2_vcmpwgtu: +def HEXAGON_A2_vcmpwgtu: qi_ALU64_didi <"vcmpw.gtu",int_hexagon_A2_vcmpwgtu>; // ALU64 / VW / Vector maximum words. -def Hexagon_A2_vmaxw: +def HEXAGON_A2_vmaxw: di_ALU64_didi <"vmaxw", int_hexagon_A2_vmaxw>; -def Hexagon_A2_vmaxuw: +def HEXAGON_A2_vmaxuw: di_ALU64_didi <"vmaxuw", int_hexagon_A2_vmaxuw>; // ALU64 / VW / Vector minimum words. -def Hexagon_A2_vminw: +def HEXAGON_A2_vminw: di_ALU64_didi <"vminw", int_hexagon_A2_vminw>; -def Hexagon_A2_vminuw: +def HEXAGON_A2_vminuw: di_ALU64_didi <"vminuw", int_hexagon_A2_vminuw>; // ALU64 / VW / Vector subtract words. -def Hexagon_A2_vsubw: +def HEXAGON_A2_vsubw: di_ALU64_didi <"vsubw", int_hexagon_A2_vsubw>; -def Hexagon_A2_vsubws: +def HEXAGON_A2_vsubws: di_ALU64_didi_sat <"vsubw", int_hexagon_A2_vsubws>; @@ -2259,25 +2290,25 @@ def Hexagon_A2_vsubws: *********************************************************************/ // CR / Logical reductions on predicates. -def Hexagon_C2_all8: +def HEXAGON_C2_all8: qi_SInst_qi <"all8", int_hexagon_C2_all8>; -def Hexagon_C2_any8: +def HEXAGON_C2_any8: qi_SInst_qi <"any8", int_hexagon_C2_any8>; // CR / Logical operations on predicates. -def Hexagon_C2_pxfer_map: +def HEXAGON_C2_pxfer_map: qi_SInst_qi_pxfer <"", int_hexagon_C2_pxfer_map>; -def Hexagon_C2_and: +def HEXAGON_C2_and: qi_SInst_qiqi <"and", int_hexagon_C2_and>; -def Hexagon_C2_andn: +def HEXAGON_C2_andn: qi_SInst_qiqi_neg <"and", int_hexagon_C2_andn>; -def Hexagon_C2_not: +def HEXAGON_C2_not: qi_SInst_qi <"not", int_hexagon_C2_not>; -def Hexagon_C2_or: +def HEXAGON_C2_or: qi_SInst_qiqi <"or", int_hexagon_C2_or>; -def Hexagon_C2_orn: +def HEXAGON_C2_orn: qi_SInst_qiqi_neg <"or", int_hexagon_C2_orn>; -def Hexagon_C2_xor: +def HEXAGON_C2_xor: qi_SInst_qiqi <"xor", int_hexagon_C2_xor>; @@ -2286,27 +2317,27 @@ def Hexagon_C2_xor: *********************************************************************/ // MTYPE / ALU / Add and accumulate. -def Hexagon_M2_acci: +def HEXAGON_M2_acci: si_MInst_sisisi_acc <"add", int_hexagon_M2_acci>; -def Hexagon_M2_accii: +def HEXAGON_M2_accii: si_MInst_sisis8_acc <"add", int_hexagon_M2_accii>; -def Hexagon_M2_nacci: +def HEXAGON_M2_nacci: si_MInst_sisisi_nac <"add", int_hexagon_M2_nacci>; -def Hexagon_M2_naccii: +def HEXAGON_M2_naccii: si_MInst_sisis8_nac <"add", int_hexagon_M2_naccii>; // MTYPE / ALU / Subtract and accumulate. -def Hexagon_M2_subacc: +def HEXAGON_M2_subacc: si_MInst_sisisi_acc <"sub", int_hexagon_M2_subacc>; // MTYPE / ALU / Vector absolute difference. -def Hexagon_M2_vabsdiffh: +def HEXAGON_M2_vabsdiffh: di_MInst_didi <"vabsdiffh",int_hexagon_M2_vabsdiffh>; -def Hexagon_M2_vabsdiffw: +def HEXAGON_M2_vabsdiffw: di_MInst_didi <"vabsdiffw",int_hexagon_M2_vabsdiffw>; // MTYPE / ALU / XOR and xor with destination. -def Hexagon_M2_xor_xacc: +def HEXAGON_M2_xor_xacc: si_MInst_sisisi_xacc <"xor", int_hexagon_M2_xor_xacc>; @@ -2316,91 +2347,91 @@ def Hexagon_M2_xor_xacc: // MTYPE / COMPLEX / Complex multiply. // Rdd[-+]=cmpy(Rs, Rt:<<1]:sat -def Hexagon_M2_cmpys_s1: +def HEXAGON_M2_cmpys_s1: di_MInst_sisi_s1_sat <"cmpy", int_hexagon_M2_cmpys_s1>; -def Hexagon_M2_cmpys_s0: +def HEXAGON_M2_cmpys_s0: di_MInst_sisi_sat <"cmpy", int_hexagon_M2_cmpys_s0>; -def Hexagon_M2_cmpysc_s1: +def HEXAGON_M2_cmpysc_s1: di_MInst_sisi_s1_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s1>; -def Hexagon_M2_cmpysc_s0: +def HEXAGON_M2_cmpysc_s0: di_MInst_sisi_sat_conj <"cmpy", int_hexagon_M2_cmpysc_s0>; -def Hexagon_M2_cmacs_s1: +def HEXAGON_M2_cmacs_s1: di_MInst_disisi_acc_s1_sat <"cmpy", int_hexagon_M2_cmacs_s1>; -def Hexagon_M2_cmacs_s0: +def HEXAGON_M2_cmacs_s0: di_MInst_disisi_acc_sat <"cmpy", int_hexagon_M2_cmacs_s0>; -def Hexagon_M2_cmacsc_s1: +def HEXAGON_M2_cmacsc_s1: di_MInst_disisi_acc_s1_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s1>; -def Hexagon_M2_cmacsc_s0: +def HEXAGON_M2_cmacsc_s0: di_MInst_disisi_acc_sat_conj <"cmpy", int_hexagon_M2_cmacsc_s0>; -def Hexagon_M2_cnacs_s1: +def HEXAGON_M2_cnacs_s1: di_MInst_disisi_nac_s1_sat <"cmpy", int_hexagon_M2_cnacs_s1>; -def Hexagon_M2_cnacs_s0: +def HEXAGON_M2_cnacs_s0: di_MInst_disisi_nac_sat <"cmpy", int_hexagon_M2_cnacs_s0>; -def Hexagon_M2_cnacsc_s1: +def HEXAGON_M2_cnacsc_s1: di_MInst_disisi_nac_s1_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s1>; -def Hexagon_M2_cnacsc_s0: +def HEXAGON_M2_cnacsc_s0: di_MInst_disisi_nac_sat_conj <"cmpy", int_hexagon_M2_cnacsc_s0>; // MTYPE / COMPLEX / Complex multiply real or imaginary. -def Hexagon_M2_cmpyr_s0: +def HEXAGON_M2_cmpyr_s0: di_MInst_sisi <"cmpyr", int_hexagon_M2_cmpyr_s0>; -def Hexagon_M2_cmacr_s0: +def HEXAGON_M2_cmacr_s0: di_MInst_disisi_acc <"cmpyr", int_hexagon_M2_cmacr_s0>; -def Hexagon_M2_cmpyi_s0: +def HEXAGON_M2_cmpyi_s0: di_MInst_sisi <"cmpyi", int_hexagon_M2_cmpyi_s0>; -def Hexagon_M2_cmaci_s0: +def HEXAGON_M2_cmaci_s0: di_MInst_disisi_acc <"cmpyi", int_hexagon_M2_cmaci_s0>; // MTYPE / COMPLEX / Complex multiply with round and pack. // Rxx32+=cmpy(Rs32,[*]Rt32:<<1]:rnd:sat -def Hexagon_M2_cmpyrs_s0: +def HEXAGON_M2_cmpyrs_s0: si_MInst_sisi_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s0>; -def Hexagon_M2_cmpyrs_s1: +def HEXAGON_M2_cmpyrs_s1: si_MInst_sisi_s1_rnd_sat <"cmpy", int_hexagon_M2_cmpyrs_s1>; -def Hexagon_M2_cmpyrsc_s0: +def HEXAGON_M2_cmpyrsc_s0: si_MInst_sisi_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s0>; -def Hexagon_M2_cmpyrsc_s1: +def HEXAGON_M2_cmpyrsc_s1: si_MInst_sisi_s1_rnd_sat_conj <"cmpy", int_hexagon_M2_cmpyrsc_s1>; //MTYPE / COMPLEX / Vector complex multiply real or imaginary. -def Hexagon_M2_vcmpy_s0_sat_i: +def HEXAGON_M2_vcmpy_s0_sat_i: di_MInst_didi_sat <"vcmpyi", int_hexagon_M2_vcmpy_s0_sat_i>; -def Hexagon_M2_vcmpy_s1_sat_i: +def HEXAGON_M2_vcmpy_s1_sat_i: di_MInst_didi_s1_sat <"vcmpyi", int_hexagon_M2_vcmpy_s1_sat_i>; -def Hexagon_M2_vcmpy_s0_sat_r: +def HEXAGON_M2_vcmpy_s0_sat_r: di_MInst_didi_sat <"vcmpyr", int_hexagon_M2_vcmpy_s0_sat_r>; -def Hexagon_M2_vcmpy_s1_sat_r: +def HEXAGON_M2_vcmpy_s1_sat_r: di_MInst_didi_s1_sat <"vcmpyr", int_hexagon_M2_vcmpy_s1_sat_r>; -def Hexagon_M2_vcmac_s0_sat_i: +def HEXAGON_M2_vcmac_s0_sat_i: di_MInst_dididi_acc_sat <"vcmpyi", int_hexagon_M2_vcmac_s0_sat_i>; -def Hexagon_M2_vcmac_s0_sat_r: +def HEXAGON_M2_vcmac_s0_sat_r: di_MInst_dididi_acc_sat <"vcmpyr", int_hexagon_M2_vcmac_s0_sat_r>; //MTYPE / COMPLEX / Vector reduce complex multiply real or imaginary. -def Hexagon_M2_vrcmpyi_s0: +def HEXAGON_M2_vrcmpyi_s0: di_MInst_didi <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0>; -def Hexagon_M2_vrcmpyr_s0: +def HEXAGON_M2_vrcmpyr_s0: di_MInst_didi <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0>; -def Hexagon_M2_vrcmpyi_s0c: +def HEXAGON_M2_vrcmpyi_s0c: di_MInst_didi_conj <"vrcmpyi", int_hexagon_M2_vrcmpyi_s0c>; -def Hexagon_M2_vrcmpyr_s0c: +def HEXAGON_M2_vrcmpyr_s0c: di_MInst_didi_conj <"vrcmpyr", int_hexagon_M2_vrcmpyr_s0c>; -def Hexagon_M2_vrcmaci_s0: +def HEXAGON_M2_vrcmaci_s0: di_MInst_dididi_acc <"vrcmpyi", int_hexagon_M2_vrcmaci_s0>; -def Hexagon_M2_vrcmacr_s0: +def HEXAGON_M2_vrcmacr_s0: di_MInst_dididi_acc <"vrcmpyr", int_hexagon_M2_vrcmacr_s0>; -def Hexagon_M2_vrcmaci_s0c: +def HEXAGON_M2_vrcmaci_s0c: di_MInst_dididi_acc_conj <"vrcmpyi", int_hexagon_M2_vrcmaci_s0c>; -def Hexagon_M2_vrcmacr_s0c: +def HEXAGON_M2_vrcmacr_s0c: di_MInst_dididi_acc_conj <"vrcmpyr", int_hexagon_M2_vrcmacr_s0c>; @@ -2409,115 +2440,120 @@ def Hexagon_M2_vrcmacr_s0c: *********************************************************************/ // MTYPE / MPYH / Multiply and use lower result. -//def Hexagon_M2_mpysmi: +//def HEXAGON_M2_mpysmi: +//FIXME: Hexagon_M2_mpysmi should really by of the type si_MInst_sim9, +// not si_MInst_sis9 - but for now, we will use s9. +// def Hexagon_M2_mpysmi: // si_MInst_sim9 <"mpyi", int_hexagon_M2_mpysmi>; -def Hexagon_M2_mpyi: +def Hexagon_M2_mpysmi: + si_MInst_sis9 <"mpyi", int_hexagon_M2_mpysmi>; +def HEXAGON_M2_mpyi: si_MInst_sisi <"mpyi", int_hexagon_M2_mpyi>; -def Hexagon_M2_mpyui: +def HEXAGON_M2_mpyui: si_MInst_sisi <"mpyui", int_hexagon_M2_mpyui>; -def Hexagon_M2_macsip: +def HEXAGON_M2_macsip: si_MInst_sisiu8_acc <"mpyi", int_hexagon_M2_macsip>; -def Hexagon_M2_maci: +def HEXAGON_M2_maci: si_MInst_sisisi_acc <"mpyi", int_hexagon_M2_maci>; -def Hexagon_M2_macsin: +def HEXAGON_M2_macsin: si_MInst_sisiu8_nac <"mpyi", int_hexagon_M2_macsin>; // MTYPE / MPYH / Multiply word by half (32x16). //Rdd[+]=vmpywoh(Rss,Rtt)[:<<1][:rnd][:sat] //Rdd[+]=vmpyweh(Rss,Rtt)[:<<1][:rnd][:sat] -def Hexagon_M2_mmpyl_rs1: +def HEXAGON_M2_mmpyl_rs1: di_MInst_didi_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs1>; -def Hexagon_M2_mmpyl_s1: +def HEXAGON_M2_mmpyl_s1: di_MInst_didi_s1_sat <"vmpyweh", int_hexagon_M2_mmpyl_s1>; -def Hexagon_M2_mmpyl_rs0: +def HEXAGON_M2_mmpyl_rs0: di_MInst_didi_rnd_sat <"vmpyweh", int_hexagon_M2_mmpyl_rs0>; -def Hexagon_M2_mmpyl_s0: +def HEXAGON_M2_mmpyl_s0: di_MInst_didi_sat <"vmpyweh", int_hexagon_M2_mmpyl_s0>; -def Hexagon_M2_mmpyh_rs1: +def HEXAGON_M2_mmpyh_rs1: di_MInst_didi_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs1>; -def Hexagon_M2_mmpyh_s1: +def HEXAGON_M2_mmpyh_s1: di_MInst_didi_s1_sat <"vmpywoh", int_hexagon_M2_mmpyh_s1>; -def Hexagon_M2_mmpyh_rs0: +def HEXAGON_M2_mmpyh_rs0: di_MInst_didi_rnd_sat <"vmpywoh", int_hexagon_M2_mmpyh_rs0>; -def Hexagon_M2_mmpyh_s0: +def HEXAGON_M2_mmpyh_s0: di_MInst_didi_sat <"vmpywoh", int_hexagon_M2_mmpyh_s0>; -def Hexagon_M2_mmacls_rs1: +def HEXAGON_M2_mmacls_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs1>; -def Hexagon_M2_mmacls_s1: +def HEXAGON_M2_mmacls_s1: di_MInst_dididi_acc_s1_sat <"vmpyweh", int_hexagon_M2_mmacls_s1>; -def Hexagon_M2_mmacls_rs0: +def HEXAGON_M2_mmacls_rs0: di_MInst_dididi_acc_rnd_sat <"vmpyweh", int_hexagon_M2_mmacls_rs0>; -def Hexagon_M2_mmacls_s0: +def HEXAGON_M2_mmacls_s0: di_MInst_dididi_acc_sat <"vmpyweh", int_hexagon_M2_mmacls_s0>; -def Hexagon_M2_mmachs_rs1: +def HEXAGON_M2_mmachs_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs1>; -def Hexagon_M2_mmachs_s1: +def HEXAGON_M2_mmachs_s1: di_MInst_dididi_acc_s1_sat <"vmpywoh", int_hexagon_M2_mmachs_s1>; -def Hexagon_M2_mmachs_rs0: +def HEXAGON_M2_mmachs_rs0: di_MInst_dididi_acc_rnd_sat <"vmpywoh", int_hexagon_M2_mmachs_rs0>; -def Hexagon_M2_mmachs_s0: +def HEXAGON_M2_mmachs_s0: di_MInst_dididi_acc_sat <"vmpywoh", int_hexagon_M2_mmachs_s0>; // MTYPE / MPYH / Multiply word by unsigned half (32x16). //Rdd[+]=vmpywouh(Rss,Rtt)[:<<1][:rnd][:sat] //Rdd[+]=vmpyweuh(Rss,Rtt)[:<<1][:rnd][:sat] -def Hexagon_M2_mmpyul_rs1: +def HEXAGON_M2_mmpyul_rs1: di_MInst_didi_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs1>; -def Hexagon_M2_mmpyul_s1: +def HEXAGON_M2_mmpyul_s1: di_MInst_didi_s1_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s1>; -def Hexagon_M2_mmpyul_rs0: +def HEXAGON_M2_mmpyul_rs0: di_MInst_didi_rnd_sat <"vmpyweuh", int_hexagon_M2_mmpyul_rs0>; -def Hexagon_M2_mmpyul_s0: +def HEXAGON_M2_mmpyul_s0: di_MInst_didi_sat <"vmpyweuh", int_hexagon_M2_mmpyul_s0>; -def Hexagon_M2_mmpyuh_rs1: +def HEXAGON_M2_mmpyuh_rs1: di_MInst_didi_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs1>; -def Hexagon_M2_mmpyuh_s1: +def HEXAGON_M2_mmpyuh_s1: di_MInst_didi_s1_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s1>; -def Hexagon_M2_mmpyuh_rs0: +def HEXAGON_M2_mmpyuh_rs0: di_MInst_didi_rnd_sat <"vmpywouh", int_hexagon_M2_mmpyuh_rs0>; -def Hexagon_M2_mmpyuh_s0: +def HEXAGON_M2_mmpyuh_s0: di_MInst_didi_sat <"vmpywouh", int_hexagon_M2_mmpyuh_s0>; -def Hexagon_M2_mmaculs_rs1: +def HEXAGON_M2_mmaculs_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs1>; -def Hexagon_M2_mmaculs_s1: +def HEXAGON_M2_mmaculs_s1: di_MInst_dididi_acc_s1_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s1>; -def Hexagon_M2_mmaculs_rs0: +def HEXAGON_M2_mmaculs_rs0: di_MInst_dididi_acc_rnd_sat <"vmpyweuh", int_hexagon_M2_mmaculs_rs0>; -def Hexagon_M2_mmaculs_s0: +def HEXAGON_M2_mmaculs_s0: di_MInst_dididi_acc_sat <"vmpyweuh", int_hexagon_M2_mmaculs_s0>; -def Hexagon_M2_mmacuhs_rs1: +def HEXAGON_M2_mmacuhs_rs1: di_MInst_dididi_acc_s1_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs1>; -def Hexagon_M2_mmacuhs_s1: +def HEXAGON_M2_mmacuhs_s1: di_MInst_dididi_acc_s1_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s1>; -def Hexagon_M2_mmacuhs_rs0: +def HEXAGON_M2_mmacuhs_rs0: di_MInst_dididi_acc_rnd_sat <"vmpywouh", int_hexagon_M2_mmacuhs_rs0>; -def Hexagon_M2_mmacuhs_s0: +def HEXAGON_M2_mmacuhs_s0: di_MInst_dididi_acc_sat <"vmpywouh", int_hexagon_M2_mmacuhs_s0>; // MTYPE / MPYH / Multiply and use upper result. -def Hexagon_M2_hmmpyh_rs1: +def HEXAGON_M2_hmmpyh_rs1: si_MInst_sisi_h_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyh_rs1>; -def Hexagon_M2_hmmpyl_rs1: +def HEXAGON_M2_hmmpyl_rs1: si_MInst_sisi_l_s1_rnd_sat <"mpy", int_hexagon_M2_hmmpyl_rs1>; -def Hexagon_M2_mpy_up: +def HEXAGON_M2_mpy_up: si_MInst_sisi <"mpy", int_hexagon_M2_mpy_up>; -def Hexagon_M2_dpmpyss_rnd_s0: +def HEXAGON_M2_dpmpyss_rnd_s0: si_MInst_sisi_rnd <"mpy", int_hexagon_M2_dpmpyss_rnd_s0>; -def Hexagon_M2_mpyu_up: +def HEXAGON_M2_mpyu_up: si_MInst_sisi <"mpyu", int_hexagon_M2_mpyu_up>; // MTYPE / MPYH / Multiply and use full result. -def Hexagon_M2_dpmpyuu_s0: +def HEXAGON_M2_dpmpyuu_s0: di_MInst_sisi <"mpyu", int_hexagon_M2_dpmpyuu_s0>; -def Hexagon_M2_dpmpyuu_acc_s0: +def HEXAGON_M2_dpmpyuu_acc_s0: di_MInst_disisi_acc <"mpyu", int_hexagon_M2_dpmpyuu_acc_s0>; -def Hexagon_M2_dpmpyuu_nac_s0: +def HEXAGON_M2_dpmpyuu_nac_s0: di_MInst_disisi_nac <"mpyu", int_hexagon_M2_dpmpyuu_nac_s0>; -def Hexagon_M2_dpmpyss_s0: +def HEXAGON_M2_dpmpyss_s0: di_MInst_sisi <"mpy", int_hexagon_M2_dpmpyss_s0>; -def Hexagon_M2_dpmpyss_acc_s0: +def HEXAGON_M2_dpmpyss_acc_s0: di_MInst_disisi_acc <"mpy", int_hexagon_M2_dpmpyss_acc_s0>; -def Hexagon_M2_dpmpyss_nac_s0: +def HEXAGON_M2_dpmpyss_nac_s0: di_MInst_disisi_nac <"mpy", int_hexagon_M2_dpmpyss_nac_s0>; @@ -2528,334 +2564,334 @@ def Hexagon_M2_dpmpyss_nac_s0: // MTYPE / MPYS / Scalar 16x16 multiply signed. //Rd=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1]| // [:<<0[:rnd|:sat|:rnd:sat]|:<<1[:rnd|:sat|:rnd:sat]]] -def Hexagon_M2_mpy_hh_s0: +def HEXAGON_M2_mpy_hh_s0: si_MInst_sisi_hh <"mpy", int_hexagon_M2_mpy_hh_s0>; -def Hexagon_M2_mpy_hh_s1: +def HEXAGON_M2_mpy_hh_s1: si_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpy_hh_s1>; -def Hexagon_M2_mpy_rnd_hh_s1: +def HEXAGON_M2_mpy_rnd_hh_s1: si_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_rnd_hh_s1>; -def Hexagon_M2_mpy_sat_rnd_hh_s1: +def HEXAGON_M2_mpy_sat_rnd_hh_s1: si_MInst_sisi_sat_rnd_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s1>; -def Hexagon_M2_mpy_sat_hh_s1: +def HEXAGON_M2_mpy_sat_hh_s1: si_MInst_sisi_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_sat_hh_s1>; -def Hexagon_M2_mpy_rnd_hh_s0: +def HEXAGON_M2_mpy_rnd_hh_s0: si_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpy_rnd_hh_s0>; -def Hexagon_M2_mpy_sat_rnd_hh_s0: +def HEXAGON_M2_mpy_sat_rnd_hh_s0: si_MInst_sisi_sat_rnd_hh <"mpy", int_hexagon_M2_mpy_sat_rnd_hh_s0>; -def Hexagon_M2_mpy_sat_hh_s0: +def HEXAGON_M2_mpy_sat_hh_s0: si_MInst_sisi_sat_hh <"mpy", int_hexagon_M2_mpy_sat_hh_s0>; -def Hexagon_M2_mpy_hl_s0: +def HEXAGON_M2_mpy_hl_s0: si_MInst_sisi_hl <"mpy", int_hexagon_M2_mpy_hl_s0>; -def Hexagon_M2_mpy_hl_s1: +def HEXAGON_M2_mpy_hl_s1: si_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpy_hl_s1>; -def Hexagon_M2_mpy_rnd_hl_s1: +def HEXAGON_M2_mpy_rnd_hl_s1: si_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_rnd_hl_s1>; -def Hexagon_M2_mpy_sat_rnd_hl_s1: +def HEXAGON_M2_mpy_sat_rnd_hl_s1: si_MInst_sisi_sat_rnd_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s1>; -def Hexagon_M2_mpy_sat_hl_s1: +def HEXAGON_M2_mpy_sat_hl_s1: si_MInst_sisi_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_sat_hl_s1>; -def Hexagon_M2_mpy_rnd_hl_s0: +def HEXAGON_M2_mpy_rnd_hl_s0: si_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpy_rnd_hl_s0>; -def Hexagon_M2_mpy_sat_rnd_hl_s0: +def HEXAGON_M2_mpy_sat_rnd_hl_s0: si_MInst_sisi_sat_rnd_hl <"mpy", int_hexagon_M2_mpy_sat_rnd_hl_s0>; -def Hexagon_M2_mpy_sat_hl_s0: +def HEXAGON_M2_mpy_sat_hl_s0: si_MInst_sisi_sat_hl <"mpy", int_hexagon_M2_mpy_sat_hl_s0>; -def Hexagon_M2_mpy_lh_s0: +def HEXAGON_M2_mpy_lh_s0: si_MInst_sisi_lh <"mpy", int_hexagon_M2_mpy_lh_s0>; -def Hexagon_M2_mpy_lh_s1: +def HEXAGON_M2_mpy_lh_s1: si_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpy_lh_s1>; -def Hexagon_M2_mpy_rnd_lh_s1: +def HEXAGON_M2_mpy_rnd_lh_s1: si_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_rnd_lh_s1>; -def Hexagon_M2_mpy_sat_rnd_lh_s1: +def HEXAGON_M2_mpy_sat_rnd_lh_s1: si_MInst_sisi_sat_rnd_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s1>; -def Hexagon_M2_mpy_sat_lh_s1: +def HEXAGON_M2_mpy_sat_lh_s1: si_MInst_sisi_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_sat_lh_s1>; -def Hexagon_M2_mpy_rnd_lh_s0: +def HEXAGON_M2_mpy_rnd_lh_s0: si_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpy_rnd_lh_s0>; -def Hexagon_M2_mpy_sat_rnd_lh_s0: +def HEXAGON_M2_mpy_sat_rnd_lh_s0: si_MInst_sisi_sat_rnd_lh <"mpy", int_hexagon_M2_mpy_sat_rnd_lh_s0>; -def Hexagon_M2_mpy_sat_lh_s0: +def HEXAGON_M2_mpy_sat_lh_s0: si_MInst_sisi_sat_lh <"mpy", int_hexagon_M2_mpy_sat_lh_s0>; -def Hexagon_M2_mpy_ll_s0: +def HEXAGON_M2_mpy_ll_s0: si_MInst_sisi_ll <"mpy", int_hexagon_M2_mpy_ll_s0>; -def Hexagon_M2_mpy_ll_s1: +def HEXAGON_M2_mpy_ll_s1: si_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpy_ll_s1>; -def Hexagon_M2_mpy_rnd_ll_s1: +def HEXAGON_M2_mpy_rnd_ll_s1: si_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_rnd_ll_s1>; -def Hexagon_M2_mpy_sat_rnd_ll_s1: +def HEXAGON_M2_mpy_sat_rnd_ll_s1: si_MInst_sisi_sat_rnd_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s1>; -def Hexagon_M2_mpy_sat_ll_s1: +def HEXAGON_M2_mpy_sat_ll_s1: si_MInst_sisi_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_sat_ll_s1>; -def Hexagon_M2_mpy_rnd_ll_s0: +def HEXAGON_M2_mpy_rnd_ll_s0: si_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpy_rnd_ll_s0>; -def Hexagon_M2_mpy_sat_rnd_ll_s0: +def HEXAGON_M2_mpy_sat_rnd_ll_s0: si_MInst_sisi_sat_rnd_ll <"mpy", int_hexagon_M2_mpy_sat_rnd_ll_s0>; -def Hexagon_M2_mpy_sat_ll_s0: +def HEXAGON_M2_mpy_sat_ll_s0: si_MInst_sisi_sat_ll <"mpy", int_hexagon_M2_mpy_sat_ll_s0>; //Rdd=mpy(Rs.[H|L],Rt.[H|L])[[:<<0|:<<1]|[:<<0:rnd|:<<1:rnd]] -def Hexagon_M2_mpyd_hh_s0: +def HEXAGON_M2_mpyd_hh_s0: di_MInst_sisi_hh <"mpy", int_hexagon_M2_mpyd_hh_s0>; -def Hexagon_M2_mpyd_hh_s1: +def HEXAGON_M2_mpyd_hh_s1: di_MInst_sisi_hh_s1 <"mpy", int_hexagon_M2_mpyd_hh_s1>; -def Hexagon_M2_mpyd_rnd_hh_s1: +def HEXAGON_M2_mpyd_rnd_hh_s1: di_MInst_sisi_rnd_hh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hh_s1>; -def Hexagon_M2_mpyd_rnd_hh_s0: +def HEXAGON_M2_mpyd_rnd_hh_s0: di_MInst_sisi_rnd_hh <"mpy", int_hexagon_M2_mpyd_rnd_hh_s0>; -def Hexagon_M2_mpyd_hl_s0: +def HEXAGON_M2_mpyd_hl_s0: di_MInst_sisi_hl <"mpy", int_hexagon_M2_mpyd_hl_s0>; -def Hexagon_M2_mpyd_hl_s1: +def HEXAGON_M2_mpyd_hl_s1: di_MInst_sisi_hl_s1 <"mpy", int_hexagon_M2_mpyd_hl_s1>; -def Hexagon_M2_mpyd_rnd_hl_s1: +def HEXAGON_M2_mpyd_rnd_hl_s1: di_MInst_sisi_rnd_hl_s1 <"mpy", int_hexagon_M2_mpyd_rnd_hl_s1>; -def Hexagon_M2_mpyd_rnd_hl_s0: +def HEXAGON_M2_mpyd_rnd_hl_s0: di_MInst_sisi_rnd_hl <"mpy", int_hexagon_M2_mpyd_rnd_hl_s0>; -def Hexagon_M2_mpyd_lh_s0: +def HEXAGON_M2_mpyd_lh_s0: di_MInst_sisi_lh <"mpy", int_hexagon_M2_mpyd_lh_s0>; -def Hexagon_M2_mpyd_lh_s1: +def HEXAGON_M2_mpyd_lh_s1: di_MInst_sisi_lh_s1 <"mpy", int_hexagon_M2_mpyd_lh_s1>; -def Hexagon_M2_mpyd_rnd_lh_s1: +def HEXAGON_M2_mpyd_rnd_lh_s1: di_MInst_sisi_rnd_lh_s1 <"mpy", int_hexagon_M2_mpyd_rnd_lh_s1>; -def Hexagon_M2_mpyd_rnd_lh_s0: +def HEXAGON_M2_mpyd_rnd_lh_s0: di_MInst_sisi_rnd_lh <"mpy", int_hexagon_M2_mpyd_rnd_lh_s0>; -def Hexagon_M2_mpyd_ll_s0: +def HEXAGON_M2_mpyd_ll_s0: di_MInst_sisi_ll <"mpy", int_hexagon_M2_mpyd_ll_s0>; -def Hexagon_M2_mpyd_ll_s1: +def HEXAGON_M2_mpyd_ll_s1: di_MInst_sisi_ll_s1 <"mpy", int_hexagon_M2_mpyd_ll_s1>; -def Hexagon_M2_mpyd_rnd_ll_s1: +def HEXAGON_M2_mpyd_rnd_ll_s1: di_MInst_sisi_rnd_ll_s1 <"mpy", int_hexagon_M2_mpyd_rnd_ll_s1>; -def Hexagon_M2_mpyd_rnd_ll_s0: +def HEXAGON_M2_mpyd_rnd_ll_s0: di_MInst_sisi_rnd_ll <"mpy", int_hexagon_M2_mpyd_rnd_ll_s0>; //Rx+=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]] -def Hexagon_M2_mpy_acc_hh_s0: +def HEXAGON_M2_mpy_acc_hh_s0: si_MInst_sisisi_acc_hh <"mpy", int_hexagon_M2_mpy_acc_hh_s0>; -def Hexagon_M2_mpy_acc_hh_s1: +def HEXAGON_M2_mpy_acc_hh_s1: si_MInst_sisisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_hh_s1>; -def Hexagon_M2_mpy_acc_sat_hh_s1: +def HEXAGON_M2_mpy_acc_sat_hh_s1: si_MInst_sisisi_acc_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s1>; -def Hexagon_M2_mpy_acc_sat_hh_s0: +def HEXAGON_M2_mpy_acc_sat_hh_s0: si_MInst_sisisi_acc_sat_hh <"mpy", int_hexagon_M2_mpy_acc_sat_hh_s0>; -def Hexagon_M2_mpy_acc_hl_s0: +def HEXAGON_M2_mpy_acc_hl_s0: si_MInst_sisisi_acc_hl <"mpy", int_hexagon_M2_mpy_acc_hl_s0>; -def Hexagon_M2_mpy_acc_hl_s1: +def HEXAGON_M2_mpy_acc_hl_s1: si_MInst_sisisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_hl_s1>; -def Hexagon_M2_mpy_acc_sat_hl_s1: +def HEXAGON_M2_mpy_acc_sat_hl_s1: si_MInst_sisisi_acc_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s1>; -def Hexagon_M2_mpy_acc_sat_hl_s0: +def HEXAGON_M2_mpy_acc_sat_hl_s0: si_MInst_sisisi_acc_sat_hl <"mpy", int_hexagon_M2_mpy_acc_sat_hl_s0>; -def Hexagon_M2_mpy_acc_lh_s0: +def HEXAGON_M2_mpy_acc_lh_s0: si_MInst_sisisi_acc_lh <"mpy", int_hexagon_M2_mpy_acc_lh_s0>; -def Hexagon_M2_mpy_acc_lh_s1: +def HEXAGON_M2_mpy_acc_lh_s1: si_MInst_sisisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_lh_s1>; -def Hexagon_M2_mpy_acc_sat_lh_s1: +def HEXAGON_M2_mpy_acc_sat_lh_s1: si_MInst_sisisi_acc_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s1>; -def Hexagon_M2_mpy_acc_sat_lh_s0: +def HEXAGON_M2_mpy_acc_sat_lh_s0: si_MInst_sisisi_acc_sat_lh <"mpy", int_hexagon_M2_mpy_acc_sat_lh_s0>; -def Hexagon_M2_mpy_acc_ll_s0: +def HEXAGON_M2_mpy_acc_ll_s0: si_MInst_sisisi_acc_ll <"mpy", int_hexagon_M2_mpy_acc_ll_s0>; -def Hexagon_M2_mpy_acc_ll_s1: +def HEXAGON_M2_mpy_acc_ll_s1: si_MInst_sisisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_ll_s1>; -def Hexagon_M2_mpy_acc_sat_ll_s1: +def HEXAGON_M2_mpy_acc_sat_ll_s1: si_MInst_sisisi_acc_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s1>; -def Hexagon_M2_mpy_acc_sat_ll_s0: +def HEXAGON_M2_mpy_acc_sat_ll_s0: si_MInst_sisisi_acc_sat_ll <"mpy", int_hexagon_M2_mpy_acc_sat_ll_s0>; //Rx-=mpy(Rs.[H|L],Rt.[H|L])[[[:<<0|:<<1]|[:<<0:sat|:<<1:sat]] -def Hexagon_M2_mpy_nac_hh_s0: +def HEXAGON_M2_mpy_nac_hh_s0: si_MInst_sisisi_nac_hh <"mpy", int_hexagon_M2_mpy_nac_hh_s0>; -def Hexagon_M2_mpy_nac_hh_s1: +def HEXAGON_M2_mpy_nac_hh_s1: si_MInst_sisisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_hh_s1>; -def Hexagon_M2_mpy_nac_sat_hh_s1: +def HEXAGON_M2_mpy_nac_sat_hh_s1: si_MInst_sisisi_nac_sat_hh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s1>; -def Hexagon_M2_mpy_nac_sat_hh_s0: +def HEXAGON_M2_mpy_nac_sat_hh_s0: si_MInst_sisisi_nac_sat_hh <"mpy", int_hexagon_M2_mpy_nac_sat_hh_s0>; -def Hexagon_M2_mpy_nac_hl_s0: +def HEXAGON_M2_mpy_nac_hl_s0: si_MInst_sisisi_nac_hl <"mpy", int_hexagon_M2_mpy_nac_hl_s0>; -def Hexagon_M2_mpy_nac_hl_s1: +def HEXAGON_M2_mpy_nac_hl_s1: si_MInst_sisisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_hl_s1>; -def Hexagon_M2_mpy_nac_sat_hl_s1: +def HEXAGON_M2_mpy_nac_sat_hl_s1: si_MInst_sisisi_nac_sat_hl_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s1>; -def Hexagon_M2_mpy_nac_sat_hl_s0: +def HEXAGON_M2_mpy_nac_sat_hl_s0: si_MInst_sisisi_nac_sat_hl <"mpy", int_hexagon_M2_mpy_nac_sat_hl_s0>; -def Hexagon_M2_mpy_nac_lh_s0: +def HEXAGON_M2_mpy_nac_lh_s0: si_MInst_sisisi_nac_lh <"mpy", int_hexagon_M2_mpy_nac_lh_s0>; -def Hexagon_M2_mpy_nac_lh_s1: +def HEXAGON_M2_mpy_nac_lh_s1: si_MInst_sisisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_lh_s1>; -def Hexagon_M2_mpy_nac_sat_lh_s1: +def HEXAGON_M2_mpy_nac_sat_lh_s1: si_MInst_sisisi_nac_sat_lh_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s1>; -def Hexagon_M2_mpy_nac_sat_lh_s0: +def HEXAGON_M2_mpy_nac_sat_lh_s0: si_MInst_sisisi_nac_sat_lh <"mpy", int_hexagon_M2_mpy_nac_sat_lh_s0>; -def Hexagon_M2_mpy_nac_ll_s0: +def HEXAGON_M2_mpy_nac_ll_s0: si_MInst_sisisi_nac_ll <"mpy", int_hexagon_M2_mpy_nac_ll_s0>; -def Hexagon_M2_mpy_nac_ll_s1: +def HEXAGON_M2_mpy_nac_ll_s1: si_MInst_sisisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_ll_s1>; -def Hexagon_M2_mpy_nac_sat_ll_s1: +def HEXAGON_M2_mpy_nac_sat_ll_s1: si_MInst_sisisi_nac_sat_ll_s1 <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s1>; -def Hexagon_M2_mpy_nac_sat_ll_s0: +def HEXAGON_M2_mpy_nac_sat_ll_s0: si_MInst_sisisi_nac_sat_ll <"mpy", int_hexagon_M2_mpy_nac_sat_ll_s0>; //Rx+=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1] -def Hexagon_M2_mpyd_acc_hh_s0: +def HEXAGON_M2_mpyd_acc_hh_s0: di_MInst_disisi_acc_hh <"mpy", int_hexagon_M2_mpyd_acc_hh_s0>; -def Hexagon_M2_mpyd_acc_hh_s1: +def HEXAGON_M2_mpyd_acc_hh_s1: di_MInst_disisi_acc_hh_s1 <"mpy", int_hexagon_M2_mpyd_acc_hh_s1>; -def Hexagon_M2_mpyd_acc_hl_s0: +def HEXAGON_M2_mpyd_acc_hl_s0: di_MInst_disisi_acc_hl <"mpy", int_hexagon_M2_mpyd_acc_hl_s0>; -def Hexagon_M2_mpyd_acc_hl_s1: +def HEXAGON_M2_mpyd_acc_hl_s1: di_MInst_disisi_acc_hl_s1 <"mpy", int_hexagon_M2_mpyd_acc_hl_s1>; -def Hexagon_M2_mpyd_acc_lh_s0: +def HEXAGON_M2_mpyd_acc_lh_s0: di_MInst_disisi_acc_lh <"mpy", int_hexagon_M2_mpyd_acc_lh_s0>; -def Hexagon_M2_mpyd_acc_lh_s1: +def HEXAGON_M2_mpyd_acc_lh_s1: di_MInst_disisi_acc_lh_s1 <"mpy", int_hexagon_M2_mpyd_acc_lh_s1>; -def Hexagon_M2_mpyd_acc_ll_s0: +def HEXAGON_M2_mpyd_acc_ll_s0: di_MInst_disisi_acc_ll <"mpy", int_hexagon_M2_mpyd_acc_ll_s0>; -def Hexagon_M2_mpyd_acc_ll_s1: +def HEXAGON_M2_mpyd_acc_ll_s1: di_MInst_disisi_acc_ll_s1 <"mpy", int_hexagon_M2_mpyd_acc_ll_s1>; //Rx-=mpy(Rs.[H|L],Rt.[H|L:<<0|:<<1] -def Hexagon_M2_mpyd_nac_hh_s0: +def HEXAGON_M2_mpyd_nac_hh_s0: di_MInst_disisi_nac_hh <"mpy", int_hexagon_M2_mpyd_nac_hh_s0>; -def Hexagon_M2_mpyd_nac_hh_s1: +def HEXAGON_M2_mpyd_nac_hh_s1: di_MInst_disisi_nac_hh_s1 <"mpy", int_hexagon_M2_mpyd_nac_hh_s1>; -def Hexagon_M2_mpyd_nac_hl_s0: +def HEXAGON_M2_mpyd_nac_hl_s0: di_MInst_disisi_nac_hl <"mpy", int_hexagon_M2_mpyd_nac_hl_s0>; -def Hexagon_M2_mpyd_nac_hl_s1: +def HEXAGON_M2_mpyd_nac_hl_s1: di_MInst_disisi_nac_hl_s1 <"mpy", int_hexagon_M2_mpyd_nac_hl_s1>; -def Hexagon_M2_mpyd_nac_lh_s0: +def HEXAGON_M2_mpyd_nac_lh_s0: di_MInst_disisi_nac_lh <"mpy", int_hexagon_M2_mpyd_nac_lh_s0>; -def Hexagon_M2_mpyd_nac_lh_s1: +def HEXAGON_M2_mpyd_nac_lh_s1: di_MInst_disisi_nac_lh_s1 <"mpy", int_hexagon_M2_mpyd_nac_lh_s1>; -def Hexagon_M2_mpyd_nac_ll_s0: +def HEXAGON_M2_mpyd_nac_ll_s0: di_MInst_disisi_nac_ll <"mpy", int_hexagon_M2_mpyd_nac_ll_s0>; -def Hexagon_M2_mpyd_nac_ll_s1: +def HEXAGON_M2_mpyd_nac_ll_s1: di_MInst_disisi_nac_ll_s1 <"mpy", int_hexagon_M2_mpyd_nac_ll_s1>; // MTYPE / MPYS / Scalar 16x16 multiply unsigned. //Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyu_hh_s0: +def HEXAGON_M2_mpyu_hh_s0: si_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyu_hh_s0>; -def Hexagon_M2_mpyu_hh_s1: +def HEXAGON_M2_mpyu_hh_s1: si_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyu_hh_s1>; -def Hexagon_M2_mpyu_hl_s0: +def HEXAGON_M2_mpyu_hl_s0: si_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyu_hl_s0>; -def Hexagon_M2_mpyu_hl_s1: +def HEXAGON_M2_mpyu_hl_s1: si_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyu_hl_s1>; -def Hexagon_M2_mpyu_lh_s0: +def HEXAGON_M2_mpyu_lh_s0: si_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyu_lh_s0>; -def Hexagon_M2_mpyu_lh_s1: +def HEXAGON_M2_mpyu_lh_s1: si_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyu_lh_s1>; -def Hexagon_M2_mpyu_ll_s0: +def HEXAGON_M2_mpyu_ll_s0: si_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyu_ll_s0>; -def Hexagon_M2_mpyu_ll_s1: +def HEXAGON_M2_mpyu_ll_s1: si_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyu_ll_s1>; //Rdd=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyud_hh_s0: +def HEXAGON_M2_mpyud_hh_s0: di_MInst_sisi_hh <"mpyu", int_hexagon_M2_mpyud_hh_s0>; -def Hexagon_M2_mpyud_hh_s1: +def HEXAGON_M2_mpyud_hh_s1: di_MInst_sisi_hh_s1 <"mpyu", int_hexagon_M2_mpyud_hh_s1>; -def Hexagon_M2_mpyud_hl_s0: +def HEXAGON_M2_mpyud_hl_s0: di_MInst_sisi_hl <"mpyu", int_hexagon_M2_mpyud_hl_s0>; -def Hexagon_M2_mpyud_hl_s1: +def HEXAGON_M2_mpyud_hl_s1: di_MInst_sisi_hl_s1 <"mpyu", int_hexagon_M2_mpyud_hl_s1>; -def Hexagon_M2_mpyud_lh_s0: +def HEXAGON_M2_mpyud_lh_s0: di_MInst_sisi_lh <"mpyu", int_hexagon_M2_mpyud_lh_s0>; -def Hexagon_M2_mpyud_lh_s1: +def HEXAGON_M2_mpyud_lh_s1: di_MInst_sisi_lh_s1 <"mpyu", int_hexagon_M2_mpyud_lh_s1>; -def Hexagon_M2_mpyud_ll_s0: +def HEXAGON_M2_mpyud_ll_s0: di_MInst_sisi_ll <"mpyu", int_hexagon_M2_mpyud_ll_s0>; -def Hexagon_M2_mpyud_ll_s1: +def HEXAGON_M2_mpyud_ll_s1: di_MInst_sisi_ll_s1 <"mpyu", int_hexagon_M2_mpyud_ll_s1>; //Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyu_acc_hh_s0: +def HEXAGON_M2_mpyu_acc_hh_s0: si_MInst_sisisi_acc_hh <"mpyu", int_hexagon_M2_mpyu_acc_hh_s0>; -def Hexagon_M2_mpyu_acc_hh_s1: +def HEXAGON_M2_mpyu_acc_hh_s1: si_MInst_sisisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hh_s1>; -def Hexagon_M2_mpyu_acc_hl_s0: +def HEXAGON_M2_mpyu_acc_hl_s0: si_MInst_sisisi_acc_hl <"mpyu", int_hexagon_M2_mpyu_acc_hl_s0>; -def Hexagon_M2_mpyu_acc_hl_s1: +def HEXAGON_M2_mpyu_acc_hl_s1: si_MInst_sisisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyu_acc_hl_s1>; -def Hexagon_M2_mpyu_acc_lh_s0: +def HEXAGON_M2_mpyu_acc_lh_s0: si_MInst_sisisi_acc_lh <"mpyu", int_hexagon_M2_mpyu_acc_lh_s0>; -def Hexagon_M2_mpyu_acc_lh_s1: +def HEXAGON_M2_mpyu_acc_lh_s1: si_MInst_sisisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyu_acc_lh_s1>; -def Hexagon_M2_mpyu_acc_ll_s0: +def HEXAGON_M2_mpyu_acc_ll_s0: si_MInst_sisisi_acc_ll <"mpyu", int_hexagon_M2_mpyu_acc_ll_s0>; -def Hexagon_M2_mpyu_acc_ll_s1: +def HEXAGON_M2_mpyu_acc_ll_s1: si_MInst_sisisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyu_acc_ll_s1>; //Rd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyu_nac_hh_s0: +def HEXAGON_M2_mpyu_nac_hh_s0: si_MInst_sisisi_nac_hh <"mpyu", int_hexagon_M2_mpyu_nac_hh_s0>; -def Hexagon_M2_mpyu_nac_hh_s1: +def HEXAGON_M2_mpyu_nac_hh_s1: si_MInst_sisisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hh_s1>; -def Hexagon_M2_mpyu_nac_hl_s0: +def HEXAGON_M2_mpyu_nac_hl_s0: si_MInst_sisisi_nac_hl <"mpyu", int_hexagon_M2_mpyu_nac_hl_s0>; -def Hexagon_M2_mpyu_nac_hl_s1: +def HEXAGON_M2_mpyu_nac_hl_s1: si_MInst_sisisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyu_nac_hl_s1>; -def Hexagon_M2_mpyu_nac_lh_s0: +def HEXAGON_M2_mpyu_nac_lh_s0: si_MInst_sisisi_nac_lh <"mpyu", int_hexagon_M2_mpyu_nac_lh_s0>; -def Hexagon_M2_mpyu_nac_lh_s1: +def HEXAGON_M2_mpyu_nac_lh_s1: si_MInst_sisisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyu_nac_lh_s1>; -def Hexagon_M2_mpyu_nac_ll_s0: +def HEXAGON_M2_mpyu_nac_ll_s0: si_MInst_sisisi_nac_ll <"mpyu", int_hexagon_M2_mpyu_nac_ll_s0>; -def Hexagon_M2_mpyu_nac_ll_s1: +def HEXAGON_M2_mpyu_nac_ll_s1: si_MInst_sisisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyu_nac_ll_s1>; //Rdd+=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyud_acc_hh_s0: +def HEXAGON_M2_mpyud_acc_hh_s0: di_MInst_disisi_acc_hh <"mpyu", int_hexagon_M2_mpyud_acc_hh_s0>; -def Hexagon_M2_mpyud_acc_hh_s1: +def HEXAGON_M2_mpyud_acc_hh_s1: di_MInst_disisi_acc_hh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hh_s1>; -def Hexagon_M2_mpyud_acc_hl_s0: +def HEXAGON_M2_mpyud_acc_hl_s0: di_MInst_disisi_acc_hl <"mpyu", int_hexagon_M2_mpyud_acc_hl_s0>; -def Hexagon_M2_mpyud_acc_hl_s1: +def HEXAGON_M2_mpyud_acc_hl_s1: di_MInst_disisi_acc_hl_s1 <"mpyu", int_hexagon_M2_mpyud_acc_hl_s1>; -def Hexagon_M2_mpyud_acc_lh_s0: +def HEXAGON_M2_mpyud_acc_lh_s0: di_MInst_disisi_acc_lh <"mpyu", int_hexagon_M2_mpyud_acc_lh_s0>; -def Hexagon_M2_mpyud_acc_lh_s1: +def HEXAGON_M2_mpyud_acc_lh_s1: di_MInst_disisi_acc_lh_s1 <"mpyu", int_hexagon_M2_mpyud_acc_lh_s1>; -def Hexagon_M2_mpyud_acc_ll_s0: +def HEXAGON_M2_mpyud_acc_ll_s0: di_MInst_disisi_acc_ll <"mpyu", int_hexagon_M2_mpyud_acc_ll_s0>; -def Hexagon_M2_mpyud_acc_ll_s1: +def HEXAGON_M2_mpyud_acc_ll_s1: di_MInst_disisi_acc_ll_s1 <"mpyu", int_hexagon_M2_mpyud_acc_ll_s1>; //Rdd-=mpyu(Rs.[H|L],Rt.[H|L])[:<<0|:<<1] -def Hexagon_M2_mpyud_nac_hh_s0: +def HEXAGON_M2_mpyud_nac_hh_s0: di_MInst_disisi_nac_hh <"mpyu", int_hexagon_M2_mpyud_nac_hh_s0>; -def Hexagon_M2_mpyud_nac_hh_s1: +def HEXAGON_M2_mpyud_nac_hh_s1: di_MInst_disisi_nac_hh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hh_s1>; -def Hexagon_M2_mpyud_nac_hl_s0: +def HEXAGON_M2_mpyud_nac_hl_s0: di_MInst_disisi_nac_hl <"mpyu", int_hexagon_M2_mpyud_nac_hl_s0>; -def Hexagon_M2_mpyud_nac_hl_s1: +def HEXAGON_M2_mpyud_nac_hl_s1: di_MInst_disisi_nac_hl_s1 <"mpyu", int_hexagon_M2_mpyud_nac_hl_s1>; -def Hexagon_M2_mpyud_nac_lh_s0: +def HEXAGON_M2_mpyud_nac_lh_s0: di_MInst_disisi_nac_lh <"mpyu", int_hexagon_M2_mpyud_nac_lh_s0>; -def Hexagon_M2_mpyud_nac_lh_s1: +def HEXAGON_M2_mpyud_nac_lh_s1: di_MInst_disisi_nac_lh_s1 <"mpyu", int_hexagon_M2_mpyud_nac_lh_s1>; -def Hexagon_M2_mpyud_nac_ll_s0: +def HEXAGON_M2_mpyud_nac_ll_s0: di_MInst_disisi_nac_ll <"mpyu", int_hexagon_M2_mpyud_nac_ll_s0>; -def Hexagon_M2_mpyud_nac_ll_s1: +def HEXAGON_M2_mpyud_nac_ll_s1: di_MInst_disisi_nac_ll_s1 <"mpyu", int_hexagon_M2_mpyud_nac_ll_s1>; @@ -2864,15 +2900,15 @@ def Hexagon_M2_mpyud_nac_ll_s1: *********************************************************************/ // MTYPE / VB / Vector reduce add unsigned bytes. -def Hexagon_A2_vraddub: +def HEXAGON_A2_vraddub: di_MInst_didi <"vraddub", int_hexagon_A2_vraddub>; -def Hexagon_A2_vraddub_acc: +def HEXAGON_A2_vraddub_acc: di_MInst_dididi_acc <"vraddub", int_hexagon_A2_vraddub_acc>; // MTYPE / VB / Vector sum of absolute differences unsigned bytes. -def Hexagon_A2_vrsadub: +def HEXAGON_A2_vrsadub: di_MInst_didi <"vrsadub", int_hexagon_A2_vrsadub>; -def Hexagon_A2_vrsadub_acc: +def HEXAGON_A2_vrsadub_acc: di_MInst_dididi_acc <"vrsadub", int_hexagon_A2_vrsadub_acc>; /******************************************************************** @@ -2880,56 +2916,56 @@ def Hexagon_A2_vrsadub_acc: *********************************************************************/ // MTYPE / VH / Vector dual multiply. -def Hexagon_M2_vdmpys_s1: +def HEXAGON_M2_vdmpys_s1: di_MInst_didi_s1_sat <"vdmpy", int_hexagon_M2_vdmpys_s1>; -def Hexagon_M2_vdmpys_s0: +def HEXAGON_M2_vdmpys_s0: di_MInst_didi_sat <"vdmpy", int_hexagon_M2_vdmpys_s0>; -def Hexagon_M2_vdmacs_s1: +def HEXAGON_M2_vdmacs_s1: di_MInst_dididi_acc_s1_sat <"vdmpy", int_hexagon_M2_vdmacs_s1>; -def Hexagon_M2_vdmacs_s0: +def HEXAGON_M2_vdmacs_s0: di_MInst_dididi_acc_sat <"vdmpy", int_hexagon_M2_vdmacs_s0>; // MTYPE / VH / Vector dual multiply with round and pack. -def Hexagon_M2_vdmpyrs_s0: +def HEXAGON_M2_vdmpyrs_s0: si_MInst_didi_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s0>; -def Hexagon_M2_vdmpyrs_s1: +def HEXAGON_M2_vdmpyrs_s1: si_MInst_didi_s1_rnd_sat <"vdmpy", int_hexagon_M2_vdmpyrs_s1>; // MTYPE / VH / Vector multiply even halfwords. -def Hexagon_M2_vmpy2es_s1: +def HEXAGON_M2_vmpy2es_s1: di_MInst_didi_s1_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s1>; -def Hexagon_M2_vmpy2es_s0: +def HEXAGON_M2_vmpy2es_s0: di_MInst_didi_sat <"vmpyeh", int_hexagon_M2_vmpy2es_s0>; -def Hexagon_M2_vmac2es: +def HEXAGON_M2_vmac2es: di_MInst_dididi_acc <"vmpyeh", int_hexagon_M2_vmac2es>; -def Hexagon_M2_vmac2es_s1: +def HEXAGON_M2_vmac2es_s1: di_MInst_dididi_acc_s1_sat <"vmpyeh", int_hexagon_M2_vmac2es_s1>; -def Hexagon_M2_vmac2es_s0: +def HEXAGON_M2_vmac2es_s0: di_MInst_dididi_acc_sat <"vmpyeh", int_hexagon_M2_vmac2es_s0>; // MTYPE / VH / Vector multiply halfwords. -def Hexagon_M2_vmpy2s_s0: +def HEXAGON_M2_vmpy2s_s0: di_MInst_sisi_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0>; -def Hexagon_M2_vmpy2s_s1: +def HEXAGON_M2_vmpy2s_s1: di_MInst_sisi_s1_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1>; -def Hexagon_M2_vmac2: +def HEXAGON_M2_vmac2: di_MInst_disisi_acc <"vmpyh", int_hexagon_M2_vmac2>; -def Hexagon_M2_vmac2s_s0: +def HEXAGON_M2_vmac2s_s0: di_MInst_disisi_acc_sat <"vmpyh", int_hexagon_M2_vmac2s_s0>; -def Hexagon_M2_vmac2s_s1: +def HEXAGON_M2_vmac2s_s1: di_MInst_disisi_acc_s1_sat <"vmpyh", int_hexagon_M2_vmac2s_s1>; // MTYPE / VH / Vector multiply halfwords with round and pack. -def Hexagon_M2_vmpy2s_s0pack: +def HEXAGON_M2_vmpy2s_s0pack: si_MInst_sisi_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s0pack>; -def Hexagon_M2_vmpy2s_s1pack: +def HEXAGON_M2_vmpy2s_s1pack: si_MInst_sisi_s1_rnd_sat <"vmpyh", int_hexagon_M2_vmpy2s_s1pack>; // MTYPE / VH / Vector reduce multiply halfwords. // Rxx32+=vrmpyh(Rss32,Rtt32) -def Hexagon_M2_vrmpy_s0: +def HEXAGON_M2_vrmpy_s0: di_MInst_didi <"vrmpyh", int_hexagon_M2_vrmpy_s0>; -def Hexagon_M2_vrmac_s0: +def HEXAGON_M2_vrmac_s0: di_MInst_dididi_acc <"vrmpyh", int_hexagon_M2_vrmac_s0>; @@ -2938,25 +2974,25 @@ def Hexagon_M2_vrmac_s0: *********************************************************************/ // STYPE / ALU / Absolute value. -def Hexagon_A2_abs: +def HEXAGON_A2_abs: si_SInst_si <"abs", int_hexagon_A2_abs>; -def Hexagon_A2_absp: +def HEXAGON_A2_absp: di_SInst_di <"abs", int_hexagon_A2_absp>; -def Hexagon_A2_abssat: +def HEXAGON_A2_abssat: si_SInst_si_sat <"abs", int_hexagon_A2_abssat>; // STYPE / ALU / Negate. -def Hexagon_A2_negp: +def HEXAGON_A2_negp: di_SInst_di <"neg", int_hexagon_A2_negp>; -def Hexagon_A2_negsat: +def HEXAGON_A2_negsat: si_SInst_si_sat <"neg", int_hexagon_A2_negsat>; // STYPE / ALU / Logical Not. -def Hexagon_A2_notp: +def HEXAGON_A2_notp: di_SInst_di <"not", int_hexagon_A2_notp>; // STYPE / ALU / Sign extend word to doubleword. -def Hexagon_A2_sxtw: +def HEXAGON_A2_sxtw: di_SInst_si <"sxtw", int_hexagon_A2_sxtw>; @@ -2965,88 +3001,88 @@ def Hexagon_A2_sxtw: *********************************************************************/ // STYPE / BIT / Count leading. -def Hexagon_S2_cl0: +def HEXAGON_S2_cl0: si_SInst_si <"cl0", int_hexagon_S2_cl0>; -def Hexagon_S2_cl0p: +def HEXAGON_S2_cl0p: si_SInst_di <"cl0", int_hexagon_S2_cl0p>; -def Hexagon_S2_cl1: +def HEXAGON_S2_cl1: si_SInst_si <"cl1", int_hexagon_S2_cl1>; -def Hexagon_S2_cl1p: +def HEXAGON_S2_cl1p: si_SInst_di <"cl1", int_hexagon_S2_cl1p>; -def Hexagon_S2_clb: +def HEXAGON_S2_clb: si_SInst_si <"clb", int_hexagon_S2_clb>; -def Hexagon_S2_clbp: +def HEXAGON_S2_clbp: si_SInst_di <"clb", int_hexagon_S2_clbp>; -def Hexagon_S2_clbnorm: +def HEXAGON_S2_clbnorm: si_SInst_si <"normamt", int_hexagon_S2_clbnorm>; // STYPE / BIT / Count trailing. -def Hexagon_S2_ct0: +def HEXAGON_S2_ct0: si_SInst_si <"ct0", int_hexagon_S2_ct0>; -def Hexagon_S2_ct1: +def HEXAGON_S2_ct1: si_SInst_si <"ct1", int_hexagon_S2_ct1>; // STYPE / BIT / Compare bit mask. -def HEXAGON_C2_bitsclr: +def Hexagon_C2_bitsclr: qi_SInst_sisi <"bitsclr", int_hexagon_C2_bitsclr>; -def HEXAGON_C2_bitsclri: +def Hexagon_C2_bitsclri: qi_SInst_siu6 <"bitsclr", int_hexagon_C2_bitsclri>; -def HEXAGON_C2_bitsset: +def Hexagon_C2_bitsset: qi_SInst_sisi <"bitsset", int_hexagon_C2_bitsset>; // STYPE / BIT / Extract unsigned. // Rd[d][32/64]=extractu(Rs[s],Rt[t],[imm]) -def Hexagon_S2_extractu: +def HEXAGON_S2_extractu: si_SInst_siu5u5 <"extractu",int_hexagon_S2_extractu>; -def Hexagon_S2_extractu_rp: +def HEXAGON_S2_extractu_rp: si_SInst_sidi <"extractu",int_hexagon_S2_extractu_rp>; -def Hexagon_S2_extractup: +def HEXAGON_S2_extractup: di_SInst_diu6u6 <"extractu",int_hexagon_S2_extractup>; -def Hexagon_S2_extractup_rp: +def HEXAGON_S2_extractup_rp: di_SInst_didi <"extractu",int_hexagon_S2_extractup_rp>; // STYPE / BIT / Insert bitfield. -def HEXAGON_S2_insert: +def Hexagon_S2_insert: si_SInst_sisiu5u5 <"insert", int_hexagon_S2_insert>; -def HEXAGON_S2_insert_rp: +def Hexagon_S2_insert_rp: si_SInst_sisidi <"insert", int_hexagon_S2_insert_rp>; -def HEXAGON_S2_insertp: +def Hexagon_S2_insertp: di_SInst_didiu6u6 <"insert", int_hexagon_S2_insertp>; -def HEXAGON_S2_insertp_rp: +def Hexagon_S2_insertp_rp: di_SInst_dididi <"insert", int_hexagon_S2_insertp_rp>; // STYPE / BIT / Innterleave/deinterleave. -def HEXAGON_S2_interleave: +def Hexagon_S2_interleave: di_SInst_di <"interleave", int_hexagon_S2_interleave>; -def HEXAGON_S2_deinterleave: +def Hexagon_S2_deinterleave: di_SInst_di <"deinterleave", int_hexagon_S2_deinterleave>; // STYPE / BIT / Linear feedback-shift Iteration. -def HEXAGON_S2_lfsp: +def Hexagon_S2_lfsp: di_SInst_didi <"lfs", int_hexagon_S2_lfsp>; // STYPE / BIT / Bit reverse. -def HEXAGON_S2_brev: +def Hexagon_S2_brev: si_SInst_si <"brev", int_hexagon_S2_brev>; // STYPE / BIT / Set/Clear/Toggle Bit. -def Hexagon_S2_setbit_i: +def HEXAGON_S2_setbit_i: si_SInst_siu5 <"setbit", int_hexagon_S2_setbit_i>; -def Hexagon_S2_togglebit_i: +def HEXAGON_S2_togglebit_i: si_SInst_siu5 <"togglebit", int_hexagon_S2_togglebit_i>; -def Hexagon_S2_clrbit_i: +def HEXAGON_S2_clrbit_i: si_SInst_siu5 <"clrbit", int_hexagon_S2_clrbit_i>; -def Hexagon_S2_setbit_r: +def HEXAGON_S2_setbit_r: si_SInst_sisi <"setbit", int_hexagon_S2_setbit_r>; -def Hexagon_S2_togglebit_r: +def HEXAGON_S2_togglebit_r: si_SInst_sisi <"togglebit", int_hexagon_S2_togglebit_r>; -def Hexagon_S2_clrbit_r: +def HEXAGON_S2_clrbit_r: si_SInst_sisi <"clrbit", int_hexagon_S2_clrbit_r>; // STYPE / BIT / Test Bit. -def Hexagon_S2_tstbit_i: +def HEXAGON_S2_tstbit_i: qi_SInst_siu5 <"tstbit", int_hexagon_S2_tstbit_i>; -def Hexagon_S2_tstbit_r: +def HEXAGON_S2_tstbit_r: qi_SInst_sisi <"tstbit", int_hexagon_S2_tstbit_r>; @@ -3055,11 +3091,11 @@ def Hexagon_S2_tstbit_r: *********************************************************************/ // STYPE / COMPLEX / Vector Complex conjugate. -def Hexagon_A2_vconj: +def HEXAGON_A2_vconj: di_SInst_di_sat <"vconj", int_hexagon_A2_vconj>; // STYPE / COMPLEX / Vector Complex rotate. -def Hexagon_S2_vcrotate: +def HEXAGON_S2_vcrotate: di_SInst_disi <"vcrotate",int_hexagon_S2_vcrotate>; @@ -3068,102 +3104,102 @@ def Hexagon_S2_vcrotate: *********************************************************************/ // STYPE / PERM / Saturate. -def Hexagon_A2_sat: +def HEXAGON_A2_sat: si_SInst_di <"sat", int_hexagon_A2_sat>; -def Hexagon_A2_satb: +def HEXAGON_A2_satb: si_SInst_si <"satb", int_hexagon_A2_satb>; -def Hexagon_A2_sath: +def HEXAGON_A2_sath: si_SInst_si <"sath", int_hexagon_A2_sath>; -def Hexagon_A2_satub: +def HEXAGON_A2_satub: si_SInst_si <"satub", int_hexagon_A2_satub>; -def Hexagon_A2_satuh: +def HEXAGON_A2_satuh: si_SInst_si <"satuh", int_hexagon_A2_satuh>; // STYPE / PERM / Swizzle bytes. -def Hexagon_A2_swiz: +def HEXAGON_A2_swiz: si_SInst_si <"swiz", int_hexagon_A2_swiz>; // STYPE / PERM / Vector align. // Need custom lowering -def Hexagon_S2_valignib: +def HEXAGON_S2_valignib: di_SInst_didiu3 <"valignb", int_hexagon_S2_valignib>; -def Hexagon_S2_valignrb: +def HEXAGON_S2_valignrb: di_SInst_didiqi <"valignb", int_hexagon_S2_valignrb>; // STYPE / PERM / Vector round and pack. -def Hexagon_S2_vrndpackwh: +def HEXAGON_S2_vrndpackwh: si_SInst_di <"vrndwh", int_hexagon_S2_vrndpackwh>; -def Hexagon_S2_vrndpackwhs: +def HEXAGON_S2_vrndpackwhs: si_SInst_di_sat <"vrndwh", int_hexagon_S2_vrndpackwhs>; // STYPE / PERM / Vector saturate and pack. -def Hexagon_S2_svsathb: +def HEXAGON_S2_svsathb: si_SInst_si <"vsathb", int_hexagon_S2_svsathb>; -def Hexagon_S2_vsathb: +def HEXAGON_S2_vsathb: si_SInst_di <"vsathb", int_hexagon_S2_vsathb>; -def Hexagon_S2_svsathub: +def HEXAGON_S2_svsathub: si_SInst_si <"vsathub", int_hexagon_S2_svsathub>; -def Hexagon_S2_vsathub: +def HEXAGON_S2_vsathub: si_SInst_di <"vsathub", int_hexagon_S2_vsathub>; -def Hexagon_S2_vsatwh: +def HEXAGON_S2_vsatwh: si_SInst_di <"vsatwh", int_hexagon_S2_vsatwh>; -def Hexagon_S2_vsatwuh: +def HEXAGON_S2_vsatwuh: si_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh>; // STYPE / PERM / Vector saturate without pack. -def Hexagon_S2_vsathb_nopack: +def HEXAGON_S2_vsathb_nopack: di_SInst_di <"vsathb", int_hexagon_S2_vsathb_nopack>; -def Hexagon_S2_vsathub_nopack: +def HEXAGON_S2_vsathub_nopack: di_SInst_di <"vsathub", int_hexagon_S2_vsathub_nopack>; -def Hexagon_S2_vsatwh_nopack: +def HEXAGON_S2_vsatwh_nopack: di_SInst_di <"vsatwh", int_hexagon_S2_vsatwh_nopack>; -def Hexagon_S2_vsatwuh_nopack: +def HEXAGON_S2_vsatwuh_nopack: di_SInst_di <"vsatwuh", int_hexagon_S2_vsatwuh_nopack>; // STYPE / PERM / Vector shuffle. -def Hexagon_S2_shuffeb: +def HEXAGON_S2_shuffeb: di_SInst_didi <"shuffeb", int_hexagon_S2_shuffeb>; -def Hexagon_S2_shuffeh: +def HEXAGON_S2_shuffeh: di_SInst_didi <"shuffeh", int_hexagon_S2_shuffeh>; -def Hexagon_S2_shuffob: +def HEXAGON_S2_shuffob: di_SInst_didi <"shuffob", int_hexagon_S2_shuffob>; -def Hexagon_S2_shuffoh: +def HEXAGON_S2_shuffoh: di_SInst_didi <"shuffoh", int_hexagon_S2_shuffoh>; // STYPE / PERM / Vector splat bytes. -def Hexagon_S2_vsplatrb: +def HEXAGON_S2_vsplatrb: si_SInst_si <"vsplatb", int_hexagon_S2_vsplatrb>; // STYPE / PERM / Vector splat halfwords. -def Hexagon_S2_vsplatrh: +def HEXAGON_S2_vsplatrh: di_SInst_si <"vsplath", int_hexagon_S2_vsplatrh>; // STYPE / PERM / Vector splice. -def HEXAGON_S2_vsplicerb: +def Hexagon_S2_vsplicerb: di_SInst_didiqi <"vspliceb",int_hexagon_S2_vsplicerb>; -def HEXAGON_S2_vspliceib: +def Hexagon_S2_vspliceib: di_SInst_didiu3 <"vspliceb",int_hexagon_S2_vspliceib>; // STYPE / PERM / Sign extend. -def Hexagon_S2_vsxtbh: +def HEXAGON_S2_vsxtbh: di_SInst_si <"vsxtbh", int_hexagon_S2_vsxtbh>; -def Hexagon_S2_vsxthw: +def HEXAGON_S2_vsxthw: di_SInst_si <"vsxthw", int_hexagon_S2_vsxthw>; // STYPE / PERM / Truncate. -def Hexagon_S2_vtrunehb: +def HEXAGON_S2_vtrunehb: si_SInst_di <"vtrunehb",int_hexagon_S2_vtrunehb>; -def Hexagon_S2_vtrunohb: +def HEXAGON_S2_vtrunohb: si_SInst_di <"vtrunohb",int_hexagon_S2_vtrunohb>; -def Hexagon_S2_vtrunewh: +def HEXAGON_S2_vtrunewh: di_SInst_didi <"vtrunewh",int_hexagon_S2_vtrunewh>; -def Hexagon_S2_vtrunowh: +def HEXAGON_S2_vtrunowh: di_SInst_didi <"vtrunowh",int_hexagon_S2_vtrunowh>; // STYPE / PERM / Zero extend. -def Hexagon_S2_vzxtbh: +def HEXAGON_S2_vzxtbh: di_SInst_si <"vzxtbh", int_hexagon_S2_vzxtbh>; -def Hexagon_S2_vzxthw: +def HEXAGON_S2_vzxthw: di_SInst_si <"vzxthw", int_hexagon_S2_vzxthw>; @@ -3172,17 +3208,17 @@ def Hexagon_S2_vzxthw: *********************************************************************/ // STYPE / PRED / Mask generate from predicate. -def Hexagon_C2_mask: +def HEXAGON_C2_mask: di_SInst_qi <"mask", int_hexagon_C2_mask>; // STYPE / PRED / Predicate transfer. -def Hexagon_C2_tfrpr: +def HEXAGON_C2_tfrpr: si_SInst_qi <"", int_hexagon_C2_tfrpr>; -def Hexagon_C2_tfrrp: +def HEXAGON_C2_tfrrp: qi_SInst_si <"", int_hexagon_C2_tfrrp>; // STYPE / PRED / Viterbi pack even and odd predicate bits. -def Hexagon_C2_vitpack: +def HEXAGON_C2_vitpack: si_SInst_qiqi <"vitpack",int_hexagon_C2_vitpack>; @@ -3191,202 +3227,202 @@ def Hexagon_C2_vitpack: *********************************************************************/ // STYPE / SHIFT / Shift by immediate. -def Hexagon_S2_asl_i_r: +def HEXAGON_S2_asl_i_r: si_SInst_siu5 <"asl", int_hexagon_S2_asl_i_r>; -def Hexagon_S2_asr_i_r: +def HEXAGON_S2_asr_i_r: si_SInst_siu5 <"asr", int_hexagon_S2_asr_i_r>; -def Hexagon_S2_lsr_i_r: +def HEXAGON_S2_lsr_i_r: si_SInst_siu5 <"lsr", int_hexagon_S2_lsr_i_r>; -def Hexagon_S2_asl_i_p: +def HEXAGON_S2_asl_i_p: di_SInst_diu6 <"asl", int_hexagon_S2_asl_i_p>; -def Hexagon_S2_asr_i_p: +def HEXAGON_S2_asr_i_p: di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p>; -def Hexagon_S2_lsr_i_p: +def HEXAGON_S2_lsr_i_p: di_SInst_diu6 <"lsr", int_hexagon_S2_lsr_i_p>; // STYPE / SHIFT / Shift by immediate and accumulate. -def Hexagon_S2_asl_i_r_acc: +def HEXAGON_S2_asl_i_r_acc: si_SInst_sisiu5_acc <"asl", int_hexagon_S2_asl_i_r_acc>; -def Hexagon_S2_asr_i_r_acc: +def HEXAGON_S2_asr_i_r_acc: si_SInst_sisiu5_acc <"asr", int_hexagon_S2_asr_i_r_acc>; -def Hexagon_S2_lsr_i_r_acc: +def HEXAGON_S2_lsr_i_r_acc: si_SInst_sisiu5_acc <"lsr", int_hexagon_S2_lsr_i_r_acc>; -def Hexagon_S2_asl_i_r_nac: +def HEXAGON_S2_asl_i_r_nac: si_SInst_sisiu5_nac <"asl", int_hexagon_S2_asl_i_r_nac>; -def Hexagon_S2_asr_i_r_nac: +def HEXAGON_S2_asr_i_r_nac: si_SInst_sisiu5_nac <"asr", int_hexagon_S2_asr_i_r_nac>; -def Hexagon_S2_lsr_i_r_nac: +def HEXAGON_S2_lsr_i_r_nac: si_SInst_sisiu5_nac <"lsr", int_hexagon_S2_lsr_i_r_nac>; -def Hexagon_S2_asl_i_p_acc: +def HEXAGON_S2_asl_i_p_acc: di_SInst_didiu6_acc <"asl", int_hexagon_S2_asl_i_p_acc>; -def Hexagon_S2_asr_i_p_acc: +def HEXAGON_S2_asr_i_p_acc: di_SInst_didiu6_acc <"asr", int_hexagon_S2_asr_i_p_acc>; -def Hexagon_S2_lsr_i_p_acc: +def HEXAGON_S2_lsr_i_p_acc: di_SInst_didiu6_acc <"lsr", int_hexagon_S2_lsr_i_p_acc>; -def Hexagon_S2_asl_i_p_nac: +def HEXAGON_S2_asl_i_p_nac: di_SInst_didiu6_nac <"asl", int_hexagon_S2_asl_i_p_nac>; -def Hexagon_S2_asr_i_p_nac: +def HEXAGON_S2_asr_i_p_nac: di_SInst_didiu6_nac <"asr", int_hexagon_S2_asr_i_p_nac>; -def Hexagon_S2_lsr_i_p_nac: +def HEXAGON_S2_lsr_i_p_nac: di_SInst_didiu6_nac <"lsr", int_hexagon_S2_lsr_i_p_nac>; // STYPE / SHIFT / Shift by immediate and add. -def Hexagon_S2_addasl_rrri: +def HEXAGON_S2_addasl_rrri: si_SInst_sisiu3 <"addasl", int_hexagon_S2_addasl_rrri>; // STYPE / SHIFT / Shift by immediate and logical. -def Hexagon_S2_asl_i_r_and: +def HEXAGON_S2_asl_i_r_and: si_SInst_sisiu5_and <"asl", int_hexagon_S2_asl_i_r_and>; -def Hexagon_S2_asr_i_r_and: +def HEXAGON_S2_asr_i_r_and: si_SInst_sisiu5_and <"asr", int_hexagon_S2_asr_i_r_and>; -def Hexagon_S2_lsr_i_r_and: +def HEXAGON_S2_lsr_i_r_and: si_SInst_sisiu5_and <"lsr", int_hexagon_S2_lsr_i_r_and>; -def Hexagon_S2_asl_i_r_xacc: +def HEXAGON_S2_asl_i_r_xacc: si_SInst_sisiu5_xor <"asl", int_hexagon_S2_asl_i_r_xacc>; -def Hexagon_S2_lsr_i_r_xacc: +def HEXAGON_S2_lsr_i_r_xacc: si_SInst_sisiu5_xor <"lsr", int_hexagon_S2_lsr_i_r_xacc>; -def Hexagon_S2_asl_i_r_or: +def HEXAGON_S2_asl_i_r_or: si_SInst_sisiu5_or <"asl", int_hexagon_S2_asl_i_r_or>; -def Hexagon_S2_asr_i_r_or: +def HEXAGON_S2_asr_i_r_or: si_SInst_sisiu5_or <"asr", int_hexagon_S2_asr_i_r_or>; -def Hexagon_S2_lsr_i_r_or: +def HEXAGON_S2_lsr_i_r_or: si_SInst_sisiu5_or <"lsr", int_hexagon_S2_lsr_i_r_or>; -def Hexagon_S2_asl_i_p_and: +def HEXAGON_S2_asl_i_p_and: di_SInst_didiu6_and <"asl", int_hexagon_S2_asl_i_p_and>; -def Hexagon_S2_asr_i_p_and: +def HEXAGON_S2_asr_i_p_and: di_SInst_didiu6_and <"asr", int_hexagon_S2_asr_i_p_and>; -def Hexagon_S2_lsr_i_p_and: +def HEXAGON_S2_lsr_i_p_and: di_SInst_didiu6_and <"lsr", int_hexagon_S2_lsr_i_p_and>; -def Hexagon_S2_asl_i_p_xacc: +def HEXAGON_S2_asl_i_p_xacc: di_SInst_didiu6_xor <"asl", int_hexagon_S2_asl_i_p_xacc>; -def Hexagon_S2_lsr_i_p_xacc: +def HEXAGON_S2_lsr_i_p_xacc: di_SInst_didiu6_xor <"lsr", int_hexagon_S2_lsr_i_p_xacc>; -def Hexagon_S2_asl_i_p_or: +def HEXAGON_S2_asl_i_p_or: di_SInst_didiu6_or <"asl", int_hexagon_S2_asl_i_p_or>; -def Hexagon_S2_asr_i_p_or: +def HEXAGON_S2_asr_i_p_or: di_SInst_didiu6_or <"asr", int_hexagon_S2_asr_i_p_or>; -def Hexagon_S2_lsr_i_p_or: +def HEXAGON_S2_lsr_i_p_or: di_SInst_didiu6_or <"lsr", int_hexagon_S2_lsr_i_p_or>; // STYPE / SHIFT / Shift right by immediate with rounding. -def Hexagon_S2_asr_i_r_rnd: +def HEXAGON_S2_asr_i_r_rnd: si_SInst_siu5_rnd <"asr", int_hexagon_S2_asr_i_r_rnd>; -def Hexagon_S2_asr_i_r_rnd_goodsyntax: +def HEXAGON_S2_asr_i_r_rnd_goodsyntax: si_SInst_siu5 <"asrrnd", int_hexagon_S2_asr_i_r_rnd_goodsyntax>; // STYPE / SHIFT / Shift left by immediate with saturation. -def Hexagon_S2_asl_i_r_sat: +def HEXAGON_S2_asl_i_r_sat: si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_i_r_sat>; // STYPE / SHIFT / Shift by register. -def Hexagon_S2_asl_r_r: +def HEXAGON_S2_asl_r_r: si_SInst_sisi <"asl", int_hexagon_S2_asl_r_r>; -def Hexagon_S2_asr_r_r: +def HEXAGON_S2_asr_r_r: si_SInst_sisi <"asr", int_hexagon_S2_asr_r_r>; -def Hexagon_S2_lsl_r_r: +def HEXAGON_S2_lsl_r_r: si_SInst_sisi <"lsl", int_hexagon_S2_lsl_r_r>; -def Hexagon_S2_lsr_r_r: +def HEXAGON_S2_lsr_r_r: si_SInst_sisi <"lsr", int_hexagon_S2_lsr_r_r>; -def Hexagon_S2_asl_r_p: +def HEXAGON_S2_asl_r_p: di_SInst_disi <"asl", int_hexagon_S2_asl_r_p>; -def Hexagon_S2_asr_r_p: +def HEXAGON_S2_asr_r_p: di_SInst_disi <"asr", int_hexagon_S2_asr_r_p>; -def Hexagon_S2_lsl_r_p: +def HEXAGON_S2_lsl_r_p: di_SInst_disi <"lsl", int_hexagon_S2_lsl_r_p>; -def Hexagon_S2_lsr_r_p: +def HEXAGON_S2_lsr_r_p: di_SInst_disi <"lsr", int_hexagon_S2_lsr_r_p>; // STYPE / SHIFT / Shift by register and accumulate. -def Hexagon_S2_asl_r_r_acc: +def HEXAGON_S2_asl_r_r_acc: si_SInst_sisisi_acc <"asl", int_hexagon_S2_asl_r_r_acc>; -def Hexagon_S2_asr_r_r_acc: +def HEXAGON_S2_asr_r_r_acc: si_SInst_sisisi_acc <"asr", int_hexagon_S2_asr_r_r_acc>; -def Hexagon_S2_lsl_r_r_acc: +def HEXAGON_S2_lsl_r_r_acc: si_SInst_sisisi_acc <"lsl", int_hexagon_S2_lsl_r_r_acc>; -def Hexagon_S2_lsr_r_r_acc: +def HEXAGON_S2_lsr_r_r_acc: si_SInst_sisisi_acc <"lsr", int_hexagon_S2_lsr_r_r_acc>; -def Hexagon_S2_asl_r_p_acc: +def HEXAGON_S2_asl_r_p_acc: di_SInst_didisi_acc <"asl", int_hexagon_S2_asl_r_p_acc>; -def Hexagon_S2_asr_r_p_acc: +def HEXAGON_S2_asr_r_p_acc: di_SInst_didisi_acc <"asr", int_hexagon_S2_asr_r_p_acc>; -def Hexagon_S2_lsl_r_p_acc: +def HEXAGON_S2_lsl_r_p_acc: di_SInst_didisi_acc <"lsl", int_hexagon_S2_lsl_r_p_acc>; -def Hexagon_S2_lsr_r_p_acc: +def HEXAGON_S2_lsr_r_p_acc: di_SInst_didisi_acc <"lsr", int_hexagon_S2_lsr_r_p_acc>; -def Hexagon_S2_asl_r_r_nac: +def HEXAGON_S2_asl_r_r_nac: si_SInst_sisisi_nac <"asl", int_hexagon_S2_asl_r_r_nac>; -def Hexagon_S2_asr_r_r_nac: +def HEXAGON_S2_asr_r_r_nac: si_SInst_sisisi_nac <"asr", int_hexagon_S2_asr_r_r_nac>; -def Hexagon_S2_lsl_r_r_nac: +def HEXAGON_S2_lsl_r_r_nac: si_SInst_sisisi_nac <"lsl", int_hexagon_S2_lsl_r_r_nac>; -def Hexagon_S2_lsr_r_r_nac: +def HEXAGON_S2_lsr_r_r_nac: si_SInst_sisisi_nac <"lsr", int_hexagon_S2_lsr_r_r_nac>; -def Hexagon_S2_asl_r_p_nac: +def HEXAGON_S2_asl_r_p_nac: di_SInst_didisi_nac <"asl", int_hexagon_S2_asl_r_p_nac>; -def Hexagon_S2_asr_r_p_nac: +def HEXAGON_S2_asr_r_p_nac: di_SInst_didisi_nac <"asr", int_hexagon_S2_asr_r_p_nac>; -def Hexagon_S2_lsl_r_p_nac: +def HEXAGON_S2_lsl_r_p_nac: di_SInst_didisi_nac <"lsl", int_hexagon_S2_lsl_r_p_nac>; -def Hexagon_S2_lsr_r_p_nac: +def HEXAGON_S2_lsr_r_p_nac: di_SInst_didisi_nac <"lsr", int_hexagon_S2_lsr_r_p_nac>; // STYPE / SHIFT / Shift by register and logical. -def Hexagon_S2_asl_r_r_and: +def HEXAGON_S2_asl_r_r_and: si_SInst_sisisi_and <"asl", int_hexagon_S2_asl_r_r_and>; -def Hexagon_S2_asr_r_r_and: +def HEXAGON_S2_asr_r_r_and: si_SInst_sisisi_and <"asr", int_hexagon_S2_asr_r_r_and>; -def Hexagon_S2_lsl_r_r_and: +def HEXAGON_S2_lsl_r_r_and: si_SInst_sisisi_and <"lsl", int_hexagon_S2_lsl_r_r_and>; -def Hexagon_S2_lsr_r_r_and: +def HEXAGON_S2_lsr_r_r_and: si_SInst_sisisi_and <"lsr", int_hexagon_S2_lsr_r_r_and>; -def Hexagon_S2_asl_r_r_or: +def HEXAGON_S2_asl_r_r_or: si_SInst_sisisi_or <"asl", int_hexagon_S2_asl_r_r_or>; -def Hexagon_S2_asr_r_r_or: +def HEXAGON_S2_asr_r_r_or: si_SInst_sisisi_or <"asr", int_hexagon_S2_asr_r_r_or>; -def Hexagon_S2_lsl_r_r_or: +def HEXAGON_S2_lsl_r_r_or: si_SInst_sisisi_or <"lsl", int_hexagon_S2_lsl_r_r_or>; -def Hexagon_S2_lsr_r_r_or: +def HEXAGON_S2_lsr_r_r_or: si_SInst_sisisi_or <"lsr", int_hexagon_S2_lsr_r_r_or>; -def Hexagon_S2_asl_r_p_and: +def HEXAGON_S2_asl_r_p_and: di_SInst_didisi_and <"asl", int_hexagon_S2_asl_r_p_and>; -def Hexagon_S2_asr_r_p_and: +def HEXAGON_S2_asr_r_p_and: di_SInst_didisi_and <"asr", int_hexagon_S2_asr_r_p_and>; -def Hexagon_S2_lsl_r_p_and: +def HEXAGON_S2_lsl_r_p_and: di_SInst_didisi_and <"lsl", int_hexagon_S2_lsl_r_p_and>; -def Hexagon_S2_lsr_r_p_and: +def HEXAGON_S2_lsr_r_p_and: di_SInst_didisi_and <"lsr", int_hexagon_S2_lsr_r_p_and>; -def Hexagon_S2_asl_r_p_or: +def HEXAGON_S2_asl_r_p_or: di_SInst_didisi_or <"asl", int_hexagon_S2_asl_r_p_or>; -def Hexagon_S2_asr_r_p_or: +def HEXAGON_S2_asr_r_p_or: di_SInst_didisi_or <"asr", int_hexagon_S2_asr_r_p_or>; -def Hexagon_S2_lsl_r_p_or: +def HEXAGON_S2_lsl_r_p_or: di_SInst_didisi_or <"lsl", int_hexagon_S2_lsl_r_p_or>; -def Hexagon_S2_lsr_r_p_or: +def HEXAGON_S2_lsr_r_p_or: di_SInst_didisi_or <"lsr", int_hexagon_S2_lsr_r_p_or>; // STYPE / SHIFT / Shift by register with saturation. -def Hexagon_S2_asl_r_r_sat: +def HEXAGON_S2_asl_r_r_sat: si_SInst_sisi_sat <"asl", int_hexagon_S2_asl_r_r_sat>; -def Hexagon_S2_asr_r_r_sat: +def HEXAGON_S2_asr_r_r_sat: si_SInst_sisi_sat <"asr", int_hexagon_S2_asr_r_r_sat>; // STYPE / SHIFT / Table Index. -def HEXAGON_S2_tableidxb_goodsyntax: +def Hexagon_S2_tableidxb_goodsyntax: si_MInst_sisiu4u5 <"tableidxb",int_hexagon_S2_tableidxb_goodsyntax>; -def HEXAGON_S2_tableidxd_goodsyntax: +def Hexagon_S2_tableidxd_goodsyntax: si_MInst_sisiu4u5 <"tableidxd",int_hexagon_S2_tableidxd_goodsyntax>; -def HEXAGON_S2_tableidxh_goodsyntax: +def Hexagon_S2_tableidxh_goodsyntax: si_MInst_sisiu4u5 <"tableidxh",int_hexagon_S2_tableidxh_goodsyntax>; -def HEXAGON_S2_tableidxw_goodsyntax: +def Hexagon_S2_tableidxw_goodsyntax: si_MInst_sisiu4u5 <"tableidxw",int_hexagon_S2_tableidxw_goodsyntax>; @@ -3396,29 +3432,29 @@ def HEXAGON_S2_tableidxw_goodsyntax: // STYPE / VH / Vector absolute value halfwords. // Rdd64=vabsh(Rss64) -def Hexagon_A2_vabsh: +def HEXAGON_A2_vabsh: di_SInst_di <"vabsh", int_hexagon_A2_vabsh>; -def Hexagon_A2_vabshsat: +def HEXAGON_A2_vabshsat: di_SInst_di_sat <"vabsh", int_hexagon_A2_vabshsat>; // STYPE / VH / Vector shift halfwords by immediate. // Rdd64=v[asl/asr/lsr]h(Rss64,Rt32) -def Hexagon_S2_asl_i_vh: +def HEXAGON_S2_asl_i_vh: di_SInst_disi <"vaslh", int_hexagon_S2_asl_i_vh>; -def Hexagon_S2_asr_i_vh: +def HEXAGON_S2_asr_i_vh: di_SInst_disi <"vasrh", int_hexagon_S2_asr_i_vh>; -def Hexagon_S2_lsr_i_vh: +def HEXAGON_S2_lsr_i_vh: di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_i_vh>; // STYPE / VH / Vector shift halfwords by register. // Rdd64=v[asl/asr/lsl/lsr]w(Rss64,Rt32) -def Hexagon_S2_asl_r_vh: +def HEXAGON_S2_asl_r_vh: di_SInst_disi <"vaslh", int_hexagon_S2_asl_r_vh>; -def Hexagon_S2_asr_r_vh: +def HEXAGON_S2_asr_r_vh: di_SInst_disi <"vasrh", int_hexagon_S2_asr_r_vh>; -def Hexagon_S2_lsl_r_vh: +def HEXAGON_S2_lsl_r_vh: di_SInst_disi <"vlslh", int_hexagon_S2_lsl_r_vh>; -def Hexagon_S2_lsr_r_vh: +def HEXAGON_S2_lsr_r_vh: di_SInst_disi <"vlsrh", int_hexagon_S2_lsr_r_vh>; @@ -3427,36 +3463,41 @@ def Hexagon_S2_lsr_r_vh: *********************************************************************/ // STYPE / VW / Vector absolute value words. -def Hexagon_A2_vabsw: +def HEXAGON_A2_vabsw: di_SInst_di <"vabsw", int_hexagon_A2_vabsw>; -def Hexagon_A2_vabswsat: +def HEXAGON_A2_vabswsat: di_SInst_di_sat <"vabsw", int_hexagon_A2_vabswsat>; // STYPE / VW / Vector shift words by immediate. // Rdd64=v[asl/vsl]w(Rss64,Rt32) -def Hexagon_S2_asl_i_vw: +def HEXAGON_S2_asl_i_vw: di_SInst_disi <"vaslw", int_hexagon_S2_asl_i_vw>; -def Hexagon_S2_asr_i_vw: +def HEXAGON_S2_asr_i_vw: di_SInst_disi <"vasrw", int_hexagon_S2_asr_i_vw>; -def Hexagon_S2_lsr_i_vw: +def HEXAGON_S2_lsr_i_vw: di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_i_vw>; // STYPE / VW / Vector shift words by register. // Rdd64=v[asl/vsl]w(Rss64,Rt32) -def Hexagon_S2_asl_r_vw: +def HEXAGON_S2_asl_r_vw: di_SInst_disi <"vaslw", int_hexagon_S2_asl_r_vw>; -def Hexagon_S2_asr_r_vw: +def HEXAGON_S2_asr_r_vw: di_SInst_disi <"vasrw", int_hexagon_S2_asr_r_vw>; -def Hexagon_S2_lsl_r_vw: +def HEXAGON_S2_lsl_r_vw: di_SInst_disi <"vlslw", int_hexagon_S2_lsl_r_vw>; -def Hexagon_S2_lsr_r_vw: +def HEXAGON_S2_lsr_r_vw: di_SInst_disi <"vlsrw", int_hexagon_S2_lsr_r_vw>; // STYPE / VW / Vector shift words with truncate and pack. -def Hexagon_S2_asr_r_svw_trun: +def HEXAGON_S2_asr_r_svw_trun: si_SInst_disi <"vasrw", int_hexagon_S2_asr_r_svw_trun>; -def Hexagon_S2_asr_i_svw_trun: +def HEXAGON_S2_asr_i_svw_trun: si_SInst_diu5 <"vasrw", int_hexagon_S2_asr_i_svw_trun>; +// LD / Circular loads. +def HEXAGON_circ_ldd: + di_LDInstPI_diu4 <"circ_ldd", int_hexagon_circ_ldd>; + include "HexagonIntrinsicsV3.td" include "HexagonIntrinsicsV4.td" +include "HexagonIntrinsicsV5.td" diff --git a/lib/Target/Hexagon/HexagonIntrinsicsDerived.td b/lib/Target/Hexagon/HexagonIntrinsicsDerived.td index 68eaf68..2788101 100644 --- a/lib/Target/Hexagon/HexagonIntrinsicsDerived.td +++ b/lib/Target/Hexagon/HexagonIntrinsicsDerived.td @@ -12,18 +12,28 @@ // Optimized with intrinisics accumulates // def : Pat <(mul DoubleRegs:$src1, DoubleRegs:$src2), - (COMBINE_rr - (Hexagon_M2_maci - (Hexagon_M2_maci (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)), - subreg_hireg), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_hireg)), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src1, subreg_hireg)), - (EXTRACT_SUBREG (MPYU64 (EXTRACT_SUBREG DoubleRegs:$src1, subreg_loreg), - (EXTRACT_SUBREG DoubleRegs:$src2, subreg_loreg)), - subreg_loreg))>; + (i64 + (COMBINE_rr + (HEXAGON_M2_maci + (HEXAGON_M2_maci + (i32 + (EXTRACT_SUBREG + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), + subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), + subreg_hireg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg))), + (i32 + (EXTRACT_SUBREG + (i64 + (MPYU64 (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), + (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), + subreg_loreg)))), subreg_loreg))))>; diff --git a/lib/Target/Hexagon/HexagonIntrinsicsV5.td b/lib/Target/Hexagon/HexagonIntrinsicsV5.td new file mode 100644 index 0000000..1d44b52 --- /dev/null +++ b/lib/Target/Hexagon/HexagonIntrinsicsV5.td @@ -0,0 +1,395 @@ +class sf_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1))]>; + +class si_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1))]>; + +class sf_SInst_si<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1))]>; + +class sf_SInst_di<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class sf_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class si_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class df_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>; + +class di_SInst_sf<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>; + +class df_SInst_si<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID IntRegs:$src1))]>; + +class df_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class di_SInst_df<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>; + + +class df_SInst_di<string opc, Intrinsic IntID> + : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), + !strconcat("$dst = ", !strconcat(opc , "($src1)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1))]>; + +class sf_MInst_sfsf<string opc, Intrinsic IntID> + : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>; + +class df_MInst_dfdf<string opc, Intrinsic IntID> + : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; + +class qi_ALU64_dfdf<string opc, Intrinsic IntID> + : ALU64_rr<(outs PredRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set PredRegs:$dst, (IntID DoubleRegs:$src1, DoubleRegs:$src2))]>; + +class qi_ALU64_dfu5<string opc, Intrinsic IntID> + : ALU64_ri<(outs PredRegs:$dst), (ins DoubleRegs:$src1, u5Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")), + [(set PredRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + + +class sf_MInst_sfsfsf_acc<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + +class sf_MInst_sfsfsf_nac<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2)")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + + +class sf_MInst_sfsfsfsi_sc<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$dst2, IntRegs:$src1, + IntRegs:$src2, IntRegs:$src3), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2, $src3):scale")), + [(set IntRegs:$dst, (IntID IntRegs:$dst2, IntRegs:$src1, + IntRegs:$src2, IntRegs:$src3))], + "$dst2 = $dst">; + +class sf_MInst_sfsfsf_acc_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2):lib")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + +class sf_MInst_sfsfsf_nac_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, + IntRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2):lib")), + [(set IntRegs:$dst, (IntID IntRegs:$src1, + IntRegs:$src2, IntRegs:$dst2))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_acc<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_nac<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + + +class df_MInst_dfdfdfsi_sc<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$dst2, DoubleRegs:$src1, + DoubleRegs:$src2, IntRegs:$src3), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2, $src3):scale")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$dst2, DoubleRegs:$src1, + DoubleRegs:$src2, IntRegs:$src3))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_acc_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst += ", !strconcat(opc , + "($src1, $src2):lib")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + +class df_MInst_dfdfdf_nac_lib<string opc, Intrinsic IntID> + : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, + DoubleRegs:$dst2), + !strconcat("$dst -= ", !strconcat(opc , + "($src1, $src2):lib")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, + DoubleRegs:$src2, DoubleRegs:$dst2))], + "$dst2 = $dst">; + +class qi_SInst_sfsf<string opc, Intrinsic IntID> + : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, $src2)")), + [(set PredRegs:$dst, (IntID IntRegs:$src1, IntRegs:$src2))]>; + +class qi_SInst_sfu5<string opc, Intrinsic IntID> + : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")), + [(set PredRegs:$dst, (IntID IntRegs:$src1, imm:$src2))]>; + +class sf_ALU64_u10_pos<string opc, Intrinsic IntID> + : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")), + [(set IntRegs:$dst, (IntID imm:$src1))]>; + +class sf_ALU64_u10_neg<string opc, Intrinsic IntID> + : ALU64_ri<(outs IntRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")), + [(set IntRegs:$dst, (IntID imm:$src1))]>; + +class df_ALU64_u10_pos<string opc, Intrinsic IntID> + : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):pos")), + [(set DoubleRegs:$dst, (IntID imm:$src1))]>; + +class df_ALU64_u10_neg<string opc, Intrinsic IntID> + : ALU64_ri<(outs DoubleRegs:$dst), (ins u10Imm:$src1), + !strconcat("$dst = ", !strconcat(opc , "#$src1):neg")), + [(set DoubleRegs:$dst, (IntID imm:$src1))]>; + +class di_MInst_diu6<string opc, Intrinsic IntID> + : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u6Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2)")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + +class di_MInst_diu4_rnd<string opc, Intrinsic IntID> + : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd")), + [(set DoubleRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + +class si_MInst_diu4_rnd_sat<string opc, Intrinsic IntID> + : MInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):rnd:sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + +class si_SInst_diu4_sat<string opc, Intrinsic IntID> + : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1, u4Imm:$src2), + !strconcat("$dst = ", !strconcat(opc , "($src1, #$src2):sat")), + [(set IntRegs:$dst, (IntID DoubleRegs:$src1, imm:$src2))]>; + + +def HEXAGON_C4_fastcorner9: + qi_SInst_qiqi <"fastcorner9", int_hexagon_C4_fastcorner9>; +def HEXAGON_C4_fastcorner9_not: + qi_SInst_qiqi <"!fastcorner9", int_hexagon_C4_fastcorner9_not>; +def HEXAGON_M5_vrmpybuu: + di_MInst_didi <"vrmpybu", int_hexagon_M5_vrmpybuu>; +def HEXAGON_M5_vrmacbuu: + di_MInst_dididi_acc <"vrmpybu", int_hexagon_M5_vrmacbuu>; +def HEXAGON_M5_vrmpybsu: + di_MInst_didi <"vrmpybsu", int_hexagon_M5_vrmpybsu>; +def HEXAGON_M5_vrmacbsu: + di_MInst_dididi_acc <"vrmpybsu", int_hexagon_M5_vrmacbsu>; +def HEXAGON_M5_vmpybuu: + di_MInst_sisi <"vmpybu", int_hexagon_M5_vmpybuu>; +def HEXAGON_M5_vmpybsu: + di_MInst_sisi <"vmpybsu", int_hexagon_M5_vmpybsu>; +def HEXAGON_M5_vmacbuu: + di_MInst_disisi_acc <"vmpybu", int_hexagon_M5_vmacbuu>; +def HEXAGON_M5_vmacbsu: + di_MInst_disisi_acc <"vmpybsu", int_hexagon_M5_vmacbsu>; +def HEXAGON_M5_vdmpybsu: + di_MInst_didi_sat <"vdmpybsu", int_hexagon_M5_vdmpybsu>; +def HEXAGON_M5_vdmacbsu: + di_MInst_dididi_acc_sat <"vdmpybsu", int_hexagon_M5_vdmacbsu>; +def HEXAGON_A5_vaddhubs: + si_SInst_didi_sat <"vaddhub", int_hexagon_A5_vaddhubs>; +def HEXAGON_S5_popcountp: + si_SInst_di <"popcount", int_hexagon_S5_popcountp>; +def HEXAGON_S5_asrhub_rnd_sat_goodsyntax: + si_MInst_diu4_rnd_sat <"vasrhub", int_hexagon_S5_asrhub_rnd_sat_goodsyntax>; +def HEXAGON_S5_asrhub_sat: + si_SInst_diu4_sat <"vasrhub", int_hexagon_S5_asrhub_sat>; +def HEXAGON_S5_vasrhrnd_goodsyntax: + di_MInst_diu4_rnd <"vasrh", int_hexagon_S5_vasrhrnd_goodsyntax>; +def HEXAGON_S2_asr_i_p_rnd: + di_SInst_diu6 <"asr", int_hexagon_S2_asr_i_p_rnd>; +def HEXAGON_S2_asr_i_p_rnd_goodsyntax: + di_MInst_diu6 <"asrrnd", int_hexagon_S2_asr_i_p_rnd_goodsyntax>; +def HEXAGON_F2_sfadd: + sf_MInst_sfsf <"sfadd", int_hexagon_F2_sfadd>; +def HEXAGON_F2_sfsub: + sf_MInst_sfsf <"sfsub", int_hexagon_F2_sfsub>; +def HEXAGON_F2_sfmpy: + sf_MInst_sfsf <"sfmpy", int_hexagon_F2_sfmpy>; +def HEXAGON_F2_sffma: + sf_MInst_sfsfsf_acc <"sfmpy", int_hexagon_F2_sffma>; +def HEXAGON_F2_sffma_sc: + sf_MInst_sfsfsfsi_sc <"sfmpy", int_hexagon_F2_sffma_sc>; +def HEXAGON_F2_sffms: + sf_MInst_sfsfsf_nac <"sfmpy", int_hexagon_F2_sffms>; +def HEXAGON_F2_sffma_lib: + sf_MInst_sfsfsf_acc_lib <"sfmpy", int_hexagon_F2_sffma_lib>; +def HEXAGON_F2_sffms_lib: + sf_MInst_sfsfsf_nac_lib <"sfmpy", int_hexagon_F2_sffms_lib>; +def HEXAGON_F2_sfcmpeq: + qi_SInst_sfsf <"sfcmp.eq", int_hexagon_F2_sfcmpeq>; +def HEXAGON_F2_sfcmpgt: + qi_SInst_sfsf <"sfcmp.gt", int_hexagon_F2_sfcmpgt>; +def HEXAGON_F2_sfcmpge: + qi_SInst_sfsf <"sfcmp.ge", int_hexagon_F2_sfcmpge>; +def HEXAGON_F2_sfcmpuo: + qi_SInst_sfsf <"sfcmp.uo", int_hexagon_F2_sfcmpuo>; +def HEXAGON_F2_sfmax: + sf_MInst_sfsf <"sfmax", int_hexagon_F2_sfmax>; +def HEXAGON_F2_sfmin: + sf_MInst_sfsf <"sfmin", int_hexagon_F2_sfmin>; +def HEXAGON_F2_sfclass: + qi_SInst_sfu5 <"sfclass", int_hexagon_F2_sfclass>; +def HEXAGON_F2_sfimm_p: + sf_ALU64_u10_pos <"sfmake", int_hexagon_F2_sfimm_p>; +def HEXAGON_F2_sfimm_n: + sf_ALU64_u10_neg <"sfmake", int_hexagon_F2_sfimm_n>; +def HEXAGON_F2_sffixupn: + sf_MInst_sfsf <"sffixupn", int_hexagon_F2_sffixupn>; +def HEXAGON_F2_sffixupd: + sf_MInst_sfsf <"sffixupd", int_hexagon_F2_sffixupd>; +def HEXAGON_F2_sffixupr: + sf_SInst_sf <"sffixupr", int_hexagon_F2_sffixupr>; +def HEXAGON_F2_dfadd: + df_MInst_dfdf <"dfadd", int_hexagon_F2_dfadd>; +def HEXAGON_F2_dfsub: + df_MInst_dfdf <"dfsub", int_hexagon_F2_dfsub>; +def HEXAGON_F2_dfmpy: + df_MInst_dfdf <"dfmpy", int_hexagon_F2_dfmpy>; +def HEXAGON_F2_dffma: + df_MInst_dfdfdf_acc <"dfmpy", int_hexagon_F2_dffma>; +def HEXAGON_F2_dffms: + df_MInst_dfdfdf_nac <"dfmpy", int_hexagon_F2_dffms>; +def HEXAGON_F2_dffma_lib: + df_MInst_dfdfdf_acc_lib <"dfmpy", int_hexagon_F2_dffma_lib>; +def HEXAGON_F2_dffms_lib: + df_MInst_dfdfdf_nac_lib <"dfmpy", int_hexagon_F2_dffms_lib>; +def HEXAGON_F2_dffma_sc: + df_MInst_dfdfdfsi_sc <"dfmpy", int_hexagon_F2_dffma_sc>; +def HEXAGON_F2_dfmax: + df_MInst_dfdf <"dfmax", int_hexagon_F2_dfmax>; +def HEXAGON_F2_dfmin: + df_MInst_dfdf <"dfmin", int_hexagon_F2_dfmin>; +def HEXAGON_F2_dfcmpeq: + qi_ALU64_dfdf <"dfcmp.eq", int_hexagon_F2_dfcmpeq>; +def HEXAGON_F2_dfcmpgt: + qi_ALU64_dfdf <"dfcmp.gt", int_hexagon_F2_dfcmpgt>; +def HEXAGON_F2_dfcmpge: + qi_ALU64_dfdf <"dfcmp.ge", int_hexagon_F2_dfcmpge>; +def HEXAGON_F2_dfcmpuo: + qi_ALU64_dfdf <"dfcmp.uo", int_hexagon_F2_dfcmpuo>; +def HEXAGON_F2_dfclass: + qi_ALU64_dfu5 <"dfclass", int_hexagon_F2_dfclass>; +def HEXAGON_F2_dfimm_p: + df_ALU64_u10_pos <"dfmake", int_hexagon_F2_dfimm_p>; +def HEXAGON_F2_dfimm_n: + df_ALU64_u10_neg <"dfmake", int_hexagon_F2_dfimm_n>; +def HEXAGON_F2_dffixupn: + df_MInst_dfdf <"dffixupn", int_hexagon_F2_dffixupn>; +def HEXAGON_F2_dffixupd: + df_MInst_dfdf <"dffixupd", int_hexagon_F2_dffixupd>; +def HEXAGON_F2_dffixupr: + df_SInst_df <"dffixupr", int_hexagon_F2_dffixupr>; +def HEXAGON_F2_conv_sf2df: + df_SInst_sf <"convert_sf2df", int_hexagon_F2_conv_sf2df>; +def HEXAGON_F2_conv_df2sf: + sf_SInst_df <"convert_df2sf", int_hexagon_F2_conv_df2sf>; +def HEXAGON_F2_conv_uw2sf: + sf_SInst_si <"convert_uw2sf", int_hexagon_F2_conv_uw2sf>; +def HEXAGON_F2_conv_uw2df: + df_SInst_si <"convert_uw2df", int_hexagon_F2_conv_uw2df>; +def HEXAGON_F2_conv_w2sf: + sf_SInst_si <"convert_w2sf", int_hexagon_F2_conv_w2sf>; +def HEXAGON_F2_conv_w2df: + df_SInst_si <"convert_w2df", int_hexagon_F2_conv_w2df>; +def HEXAGON_F2_conv_ud2sf: + sf_SInst_di <"convert_ud2sf", int_hexagon_F2_conv_ud2sf>; +def HEXAGON_F2_conv_ud2df: + df_SInst_di <"convert_ud2df", int_hexagon_F2_conv_ud2df>; +def HEXAGON_F2_conv_d2sf: + sf_SInst_di <"convert_d2sf", int_hexagon_F2_conv_d2sf>; +def HEXAGON_F2_conv_d2df: + df_SInst_di <"convert_d2df", int_hexagon_F2_conv_d2df>; +def HEXAGON_F2_conv_sf2uw: + si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw>; +def HEXAGON_F2_conv_sf2w: + si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w>; +def HEXAGON_F2_conv_sf2ud: + di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud>; +def HEXAGON_F2_conv_sf2d: + di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d>; +def HEXAGON_F2_conv_df2uw: + si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw>; +def HEXAGON_F2_conv_df2w: + si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w>; +def HEXAGON_F2_conv_df2ud: + di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud>; +def HEXAGON_F2_conv_df2d: + di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d>; +def HEXAGON_F2_conv_sf2uw_chop: + si_SInst_sf <"convert_sf2uw", int_hexagon_F2_conv_sf2uw_chop>; +def HEXAGON_F2_conv_sf2w_chop: + si_SInst_sf <"convert_sf2w", int_hexagon_F2_conv_sf2w_chop>; +def HEXAGON_F2_conv_sf2ud_chop: + di_SInst_sf <"convert_sf2ud", int_hexagon_F2_conv_sf2ud_chop>; +def HEXAGON_F2_conv_sf2d_chop: + di_SInst_sf <"convert_sf2d", int_hexagon_F2_conv_sf2d_chop>; +def HEXAGON_F2_conv_df2uw_chop: + si_SInst_df <"convert_df2uw", int_hexagon_F2_conv_df2uw_chop>; +def HEXAGON_F2_conv_df2w_chop: + si_SInst_df <"convert_df2w", int_hexagon_F2_conv_df2w_chop>; +def HEXAGON_F2_conv_df2ud_chop: + di_SInst_df <"convert_df2ud", int_hexagon_F2_conv_df2ud_chop>; +def HEXAGON_F2_conv_df2d_chop: + di_SInst_df <"convert_df2d", int_hexagon_F2_conv_df2d_chop>; diff --git a/lib/Target/Hexagon/HexagonMCInst.h b/lib/Target/Hexagon/HexagonMCInst.h new file mode 100644 index 0000000..7a16c24 --- /dev/null +++ b/lib/Target/Hexagon/HexagonMCInst.h @@ -0,0 +1,41 @@ +//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class extends MCInst to allow some VLIW annotation. +// +//===----------------------------------------------------------------------===// + +#ifndef HEXAGONMCINST_H +#define HEXAGONMCINST_H + +#include "llvm/MC/MCInst.h" +#include "llvm/CodeGen/MachineInstr.h" + +namespace llvm { + class HexagonMCInst: public MCInst { + // Packet start and end markers + unsigned startPacket: 1, endPacket: 1; + const MachineInstr *MachineI; + public: + explicit HexagonMCInst(): MCInst(), + startPacket(0), endPacket(0) {} + + const MachineInstr* getMI() const { return MachineI; } + + void setMI(const MachineInstr *MI) { MachineI = MI; } + + bool isStartPacket() const { return (startPacket); } + bool isEndPacket() const { return (endPacket); } + + void setStartPacket(bool yes) { startPacket = yes; } + void setEndPacket(bool yes) { endPacket = yes; } + }; +} + +#endif diff --git a/lib/Target/Hexagon/HexagonMCInstLower.cpp b/lib/Target/Hexagon/HexagonMCInstLower.cpp index fbb331b..70bddcc 100644 --- a/lib/Target/Hexagon/HexagonMCInstLower.cpp +++ b/lib/Target/Hexagon/HexagonMCInstLower.cpp @@ -49,7 +49,7 @@ void llvm::HexagonLowerToMC(const MachineInstr* MI, MCInst& MCI, switch (MO.getType()) { default: MI->dump(); - assert(0 && "unknown operand type"); + llvm_unreachable("unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) continue; diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp new file mode 100644 index 0000000..7ece408 --- /dev/null +++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -0,0 +1,647 @@ +//===----- HexagonNewValueJump.cpp - Hexagon Backend New Value Jump -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This implements NewValueJump pass in Hexagon. +// Ideally, we should merge this as a Peephole pass prior to register +// allocation, but because we have a spill in between the feeder and new value +// jump instructions, we are forced to write after register allocation. +// Having said that, we should re-attempt to pull this earlier at some point +// in future. + +// The basic approach looks for sequence of predicated jump, compare instruciton +// that genereates the predicate and, the feeder to the predicate. Once it finds +// all, it collapses compare and jump instruction into a new valu jump +// intstructions. +// +// +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "hexagon-nvj" +#include "llvm/PassSupport.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/ScheduleDAGInstrs.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/MachineFunctionAnalysis.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "Hexagon.h" +#include "HexagonTargetMachine.h" +#include "HexagonRegisterInfo.h" +#include "HexagonSubtarget.h" +#include "HexagonInstrInfo.h" +#include "HexagonMachineFunctionInfo.h" + +#include <map> + +#include "llvm/Support/CommandLine.h" +using namespace llvm; + +STATISTIC(NumNVJGenerated, "Number of New Value Jump Instructions created"); + +static cl::opt<int> +DbgNVJCount("nvj-count", cl::init(-1), cl::Hidden, cl::desc( + "Maximum number of predicated jumps to be converted to New Value Jump")); + +static cl::opt<bool> DisableNewValueJumps("disable-nvjump", cl::Hidden, + cl::ZeroOrMore, cl::init(false), + cl::desc("Disable New Value Jumps")); + +namespace { + struct HexagonNewValueJump : public MachineFunctionPass { + const HexagonInstrInfo *QII; + const HexagonRegisterInfo *QRI; + + public: + static char ID; + + HexagonNewValueJump() : MachineFunctionPass(ID) { } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + MachineFunctionPass::getAnalysisUsage(AU); + } + + const char *getPassName() const { + return "Hexagon NewValueJump"; + } + + virtual bool runOnMachineFunction(MachineFunction &Fn); + + private: + + }; + +} // end of anonymous namespace + +char HexagonNewValueJump::ID = 0; + +// We have identified this II could be feeder to NVJ, +// verify that it can be. +static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII, + const TargetRegisterInfo *TRI, + MachineBasicBlock::iterator II, + MachineBasicBlock::iterator end, + MachineBasicBlock::iterator skip, + MachineFunction &MF) { + + // Predicated instruction can not be feeder to NVJ. + if (QII->isPredicated(II)) + return false; + + // Bail out if feederReg is a paired register (double regs in + // our case). One would think that we can check to see if a given + // register cmpReg1 or cmpReg2 is a sub register of feederReg + // using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic + // before the callsite of this function + // But we can not as it comes in the following fashion. + // %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill> + // %R0<def> = KILL %R0, %D0<imp-use,kill> + // %P0<def> = CMPEQri %R0<kill>, 0 + // Hence, we need to check if it's a KILL instruction. + if (II->getOpcode() == TargetOpcode::KILL) + return false; + + + // Make sure there there is no 'def' or 'use' of any of the uses of + // feeder insn between it's definition, this MI and jump, jmpInst + // skipping compare, cmpInst. + // Here's the example. + // r21=memub(r22+r24<<#0) + // p0 = cmp.eq(r21, #0) + // r4=memub(r3+r21<<#0) + // if (p0.new) jump:t .LBB29_45 + // Without this check, it will be converted into + // r4=memub(r3+r21<<#0) + // r21=memub(r22+r24<<#0) + // p0 = cmp.eq(r21, #0) + // if (p0.new) jump:t .LBB29_45 + // and result WAR hazards if converted to New Value Jump. + + for (unsigned i = 0; i < II->getNumOperands(); ++i) { + if (II->getOperand(i).isReg() && + (II->getOperand(i).isUse() || II->getOperand(i).isDef())) { + MachineBasicBlock::iterator localII = II; + ++localII; + unsigned Reg = II->getOperand(i).getReg(); + for (MachineBasicBlock::iterator localBegin = localII; + localBegin != end; ++localBegin) { + if (localBegin == skip ) continue; + // Check for Subregisters too. + if (localBegin->modifiesRegister(Reg, TRI) || + localBegin->readsRegister(Reg, TRI)) + return false; + } + } + } + return true; +} + +// These are the common checks that need to performed +// to determine if +// 1. compare instruction can be moved before jump. +// 2. feeder to the compare instruction can be moved before jump. +static bool commonChecksToProhibitNewValueJump(bool afterRA, + MachineBasicBlock::iterator MII) { + + // If store in path, bail out. + if (MII->getDesc().mayStore()) + return false; + + // if call in path, bail out. + if (MII->getOpcode() == Hexagon::CALLv3) + return false; + + // if NVJ is running prior to RA, do the following checks. + if (!afterRA) { + // The following Target Opcode instructions are spurious + // to new value jump. If they are in the path, bail out. + // KILL sets kill flag on the opcode. It also sets up a + // single register, out of pair. + // %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill> + // %R0<def> = KILL %R0, %D0<imp-use,kill> + // %P0<def> = CMPEQri %R0<kill>, 0 + // PHI can be anything after RA. + // COPY can remateriaze things in between feeder, compare and nvj. + if (MII->getOpcode() == TargetOpcode::KILL || + MII->getOpcode() == TargetOpcode::PHI || + MII->getOpcode() == TargetOpcode::COPY) + return false; + + // The following pseudo Hexagon instructions sets "use" and "def" + // of registers by individual passes in the backend. At this time, + // we don't know the scope of usage and definitions of these + // instructions. + if (MII->getOpcode() == Hexagon::TFR_condset_rr || + MII->getOpcode() == Hexagon::TFR_condset_ii || + MII->getOpcode() == Hexagon::TFR_condset_ri || + MII->getOpcode() == Hexagon::TFR_condset_ir || + MII->getOpcode() == Hexagon::LDriw_pred || + MII->getOpcode() == Hexagon::STriw_pred) + return false; + } + + return true; +} + +static bool canCompareBeNewValueJump(const HexagonInstrInfo *QII, + const TargetRegisterInfo *TRI, + MachineBasicBlock::iterator II, + unsigned pReg, + bool secondReg, + bool optLocation, + MachineBasicBlock::iterator end, + MachineFunction &MF) { + + MachineInstr *MI = II; + + // If the second operand of the compare is an imm, make sure it's in the + // range specified by the arch. + if (!secondReg) { + int64_t v = MI->getOperand(2).getImm(); + if (MI->getOpcode() == Hexagon::CMPGEri || + (MI->getOpcode() == Hexagon::CMPGEUri && v > 0)) + --v; + + if (!(isUInt<5>(v) || + ((MI->getOpcode() == Hexagon::CMPEQri || + MI->getOpcode() == Hexagon::CMPGTri || + MI->getOpcode() == Hexagon::CMPGEri) && + (v == -1)))) + return false; + } + + unsigned cmpReg1, cmpOp2 = 0; // cmpOp2 assignment silences compiler warning. + cmpReg1 = MI->getOperand(1).getReg(); + + if (secondReg) { + cmpOp2 = MI->getOperand(2).getReg(); + + // Make sure that that second register is not from COPY + // At machine code level, we don't need this, but if we decide + // to move new value jump prior to RA, we would be needing this. + MachineRegisterInfo &MRI = MF.getRegInfo(); + if (secondReg && !TargetRegisterInfo::isPhysicalRegister(cmpOp2)) { + MachineInstr *def = MRI.getVRegDef(cmpOp2); + if (def->getOpcode() == TargetOpcode::COPY) + return false; + } + } + + // Walk the instructions after the compare (predicate def) to the jump, + // and satisfy the following conditions. + ++II ; + for (MachineBasicBlock::iterator localII = II; localII != end; + ++localII) { + + // Check 1. + // If "common" checks fail, bail out. + if (!commonChecksToProhibitNewValueJump(optLocation, localII)) + return false; + + // Check 2. + // If there is a def or use of predicate (result of compare), bail out. + if (localII->modifiesRegister(pReg, TRI) || + localII->readsRegister(pReg, TRI)) + return false; + + // Check 3. + // If there is a def of any of the use of the compare (operands of compare), + // bail out. + // Eg. + // p0 = cmp.eq(r2, r0) + // r2 = r4 + // if (p0.new) jump:t .LBB28_3 + if (localII->modifiesRegister(cmpReg1, TRI) || + (secondReg && localII->modifiesRegister(cmpOp2, TRI))) + return false; + } + return true; +} + +// Given a compare operator, return a matching New Value Jump +// compare operator. Make sure that MI here is included in +// HexagonInstrInfo.cpp::isNewValueJumpCandidate +static unsigned getNewValueJumpOpcode(const MachineInstr *MI, int reg, + bool secondRegNewified) { + switch (MI->getOpcode()) { + case Hexagon::CMPEQrr: + return Hexagon::JMP_EQrrPt_nv_V4; + + case Hexagon::CMPEQri: { + if (reg >= 0) + return Hexagon::JMP_EQriPt_nv_V4; + else + return Hexagon::JMP_EQriPtneg_nv_V4; + } + + case Hexagon::CMPLTrr: + case Hexagon::CMPGTrr: { + if (secondRegNewified) + return Hexagon::JMP_GTrrdnPt_nv_V4; + else + return Hexagon::JMP_GTrrPt_nv_V4; + } + + case Hexagon::CMPGEri: { + if (reg >= 1) + return Hexagon::JMP_GTriPt_nv_V4; + else + return Hexagon::JMP_GTriPtneg_nv_V4; + } + + case Hexagon::CMPGTri: { + if (reg >= 0) + return Hexagon::JMP_GTriPt_nv_V4; + else + return Hexagon::JMP_GTriPtneg_nv_V4; + } + + case Hexagon::CMPLTUrr: + case Hexagon::CMPGTUrr: { + if (secondRegNewified) + return Hexagon::JMP_GTUrrdnPt_nv_V4; + else + return Hexagon::JMP_GTUrrPt_nv_V4; + } + + case Hexagon::CMPGTUri: + return Hexagon::JMP_GTUriPt_nv_V4; + + case Hexagon::CMPGEUri: { + if (reg == 0) + return Hexagon::JMP_EQrrPt_nv_V4; + else + return Hexagon::JMP_GTUriPt_nv_V4; + } + + default: + llvm_unreachable("Could not find matching New Value Jump instruction."); + } + // return *some value* to avoid compiler warning + return 0; +} + +bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { + + DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n" + << "********** Function: " + << MF.getFunction()->getName() << "\n"); + +#if 0 + // for now disable this, if we move NewValueJump before register + // allocation we need this information. + LiveVariables &LVs = getAnalysis<LiveVariables>(); +#endif + + QII = static_cast<const HexagonInstrInfo *>(MF.getTarget().getInstrInfo()); + QRI = + static_cast<const HexagonRegisterInfo *>(MF.getTarget().getRegisterInfo()); + + if (!QRI->Subtarget.hasV4TOps() || + DisableNewValueJumps) { + return false; + } + + int nvjCount = DbgNVJCount; + int nvjGenerated = 0; + + // Loop through all the bb's of the function + for (MachineFunction::iterator MBBb = MF.begin(), MBBe = MF.end(); + MBBb != MBBe; ++MBBb) { + MachineBasicBlock* MBB = MBBb; + + DEBUG(dbgs() << "** dumping bb ** " + << MBB->getNumber() << "\n"); + DEBUG(MBB->dump()); + DEBUG(dbgs() << "\n" << "********** dumping instr bottom up **********\n"); + bool foundJump = false; + bool foundCompare = false; + bool invertPredicate = false; + unsigned predReg = 0; // predicate reg of the jump. + unsigned cmpReg1 = 0; + int cmpOp2 = 0; + bool MO1IsKill = false; + bool MO2IsKill = false; + MachineBasicBlock::iterator jmpPos; + MachineBasicBlock::iterator cmpPos; + MachineInstr *cmpInstr = NULL, *jmpInstr = NULL; + MachineBasicBlock *jmpTarget = NULL; + bool afterRA = false; + bool isSecondOpReg = false; + bool isSecondOpNewified = false; + // Traverse the basic block - bottom up + for (MachineBasicBlock::iterator MII = MBB->end(), E = MBB->begin(); + MII != E;) { + MachineInstr *MI = --MII; + if (MI->isDebugValue()) { + continue; + } + + if ((nvjCount == 0) || (nvjCount > -1 && nvjCount <= nvjGenerated)) + break; + + DEBUG(dbgs() << "Instr: "; MI->dump(); dbgs() << "\n"); + + if (!foundJump && + (MI->getOpcode() == Hexagon::JMP_c || + MI->getOpcode() == Hexagon::JMP_cNot || + MI->getOpcode() == Hexagon::JMP_cdnPt || + MI->getOpcode() == Hexagon::JMP_cdnPnt || + MI->getOpcode() == Hexagon::JMP_cdnNotPt || + MI->getOpcode() == Hexagon::JMP_cdnNotPnt)) { + // This is where you would insert your compare and + // instr that feeds compare + jmpPos = MII; + jmpInstr = MI; + predReg = MI->getOperand(0).getReg(); + afterRA = TargetRegisterInfo::isPhysicalRegister(predReg); + + // If ifconverter had not messed up with the kill flags of the + // operands, the following check on the kill flag would suffice. + // if(!jmpInstr->getOperand(0).isKill()) break; + + // This predicate register is live out out of BB + // this would only work if we can actually use Live + // variable analysis on phy regs - but LLVM does not + // provide LV analysis on phys regs. + //if(LVs.isLiveOut(predReg, *MBB)) break; + + // Get all the successors of this block - which will always + // be 2. Check if the predicate register is live in in those + // successor. If yes, we can not delete the predicate - + // I am doing this only because LLVM does not provide LiveOut + // at the BB level. + bool predLive = false; + for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(), + SIE = MBB->succ_end(); SI != SIE; ++SI) { + MachineBasicBlock* succMBB = *SI; + if (succMBB->isLiveIn(predReg)) { + predLive = true; + } + } + if (predLive) + break; + + jmpTarget = MI->getOperand(1).getMBB(); + foundJump = true; + if (MI->getOpcode() == Hexagon::JMP_cNot || + MI->getOpcode() == Hexagon::JMP_cdnNotPt || + MI->getOpcode() == Hexagon::JMP_cdnNotPnt) { + invertPredicate = true; + } + continue; + } + + // No new value jump if there is a barrier. A barrier has to be in its + // own packet. A barrier has zero operands. We conservatively bail out + // here if we see any instruction with zero operands. + if (foundJump && MI->getNumOperands() == 0) + break; + + if (foundJump && + !foundCompare && + MI->getOperand(0).isReg() && + MI->getOperand(0).getReg() == predReg) { + + // Not all compares can be new value compare. Arch Spec: 7.6.1.1 + if (QII->isNewValueJumpCandidate(MI)) { + + assert((MI->getDesc().isCompare()) && + "Only compare instruction can be collapsed into New Value Jump"); + isSecondOpReg = MI->getOperand(2).isReg(); + + if (!canCompareBeNewValueJump(QII, QRI, MII, predReg, isSecondOpReg, + afterRA, jmpPos, MF)) + break; + + cmpInstr = MI; + cmpPos = MII; + foundCompare = true; + + // We need cmpReg1 and cmpOp2(imm or reg) while building + // new value jump instruction. + cmpReg1 = MI->getOperand(1).getReg(); + if (MI->getOperand(1).isKill()) + MO1IsKill = true; + + if (isSecondOpReg) { + cmpOp2 = MI->getOperand(2).getReg(); + if (MI->getOperand(2).isKill()) + MO2IsKill = true; + } else + cmpOp2 = MI->getOperand(2).getImm(); + continue; + } + } + + if (foundCompare && foundJump) { + + // If "common" checks fail, bail out on this BB. + if (!commonChecksToProhibitNewValueJump(afterRA, MII)) + break; + + bool foundFeeder = false; + MachineBasicBlock::iterator feederPos = MII; + if (MI->getOperand(0).isReg() && + MI->getOperand(0).isDef() && + (MI->getOperand(0).getReg() == cmpReg1 || + (isSecondOpReg && + MI->getOperand(0).getReg() == (unsigned) cmpOp2))) { + + unsigned feederReg = MI->getOperand(0).getReg(); + + // First try to see if we can get the feeder from the first operand + // of the compare. If we can not, and if secondOpReg is true + // (second operand of the compare is also register), try that one. + // TODO: Try to come up with some heuristic to figure out which + // feeder would benefit. + + if (feederReg == cmpReg1) { + if (!canBeFeederToNewValueJump(QII, QRI, MII, jmpPos, cmpPos, MF)) { + if (!isSecondOpReg) + break; + else + continue; + } else + foundFeeder = true; + } + + if (!foundFeeder && + isSecondOpReg && + feederReg == (unsigned) cmpOp2) + if (!canBeFeederToNewValueJump(QII, QRI, MII, jmpPos, cmpPos, MF)) + break; + + if (isSecondOpReg) { + // In case of CMPLT, or CMPLTU, or EQ with the second register + // to newify, swap the operands. + if (cmpInstr->getOpcode() == Hexagon::CMPLTrr || + cmpInstr->getOpcode() == Hexagon::CMPLTUrr || + (cmpInstr->getOpcode() == Hexagon::CMPEQrr && + feederReg == (unsigned) cmpOp2)) { + unsigned tmp = cmpReg1; + bool tmpIsKill = MO1IsKill; + cmpReg1 = cmpOp2; + MO1IsKill = MO2IsKill; + cmpOp2 = tmp; + MO2IsKill = tmpIsKill; + } + + // Now we have swapped the operands, all we need to check is, + // if the second operand (after swap) is the feeder. + // And if it is, make a note. + if (feederReg == (unsigned)cmpOp2) + isSecondOpNewified = true; + } + + // Now that we are moving feeder close the jump, + // make sure we are respecting the kill values of + // the operands of the feeder. + + bool updatedIsKill = false; + for (unsigned i = 0; i < MI->getNumOperands(); i++) { + MachineOperand &MO = MI->getOperand(i); + if (MO.isReg() && MO.isUse()) { + unsigned feederReg = MO.getReg(); + for (MachineBasicBlock::iterator localII = feederPos, + end = jmpPos; localII != end; localII++) { + MachineInstr *localMI = localII; + for (unsigned j = 0; j < localMI->getNumOperands(); j++) { + MachineOperand &localMO = localMI->getOperand(j); + if (localMO.isReg() && localMO.isUse() && + localMO.isKill() && feederReg == localMO.getReg()) { + // We found that there is kill of a use register + // Set up a kill flag on the register + localMO.setIsKill(false); + MO.setIsKill(); + updatedIsKill = true; + break; + } + } + if (updatedIsKill) break; + } + } + if (updatedIsKill) break; + } + + MBB->splice(jmpPos, MI->getParent(), MI); + MBB->splice(jmpPos, MI->getParent(), cmpInstr); + DebugLoc dl = MI->getDebugLoc(); + MachineInstr *NewMI; + + assert((QII->isNewValueJumpCandidate(cmpInstr)) && + "This compare is not a New Value Jump candidate."); + unsigned opc = getNewValueJumpOpcode(cmpInstr, cmpOp2, + isSecondOpNewified); + if (invertPredicate) + opc = QII->getInvertedPredicatedOpcode(opc); + + // Manage the conversions from CMPGEUri to either CMPEQrr + // or CMPGTUri properly. See Arch spec for CMPGEUri instructions. + // This has to be after the getNewValueJumpOpcode function call as + // second operand of the compare could be modified in this logic. + if (cmpInstr->getOpcode() == Hexagon::CMPGEUri) { + if (cmpOp2 == 0) { + cmpOp2 = cmpReg1; + MO2IsKill = MO1IsKill; + isSecondOpReg = true; + } else + --cmpOp2; + } + + // Manage the conversions from CMPGEri to CMPGTUri properly. + // See Arch spec for CMPGEri instructions. + if (cmpInstr->getOpcode() == Hexagon::CMPGEri) + --cmpOp2; + + if (isSecondOpReg) { + NewMI = BuildMI(*MBB, jmpPos, dl, + QII->get(opc)) + .addReg(cmpReg1, getKillRegState(MO1IsKill)) + .addReg(cmpOp2, getKillRegState(MO2IsKill)) + .addMBB(jmpTarget); + } + else { + NewMI = BuildMI(*MBB, jmpPos, dl, + QII->get(opc)) + .addReg(cmpReg1, getKillRegState(MO1IsKill)) + .addImm(cmpOp2) + .addMBB(jmpTarget); + } + + assert(NewMI && "New Value Jump Instruction Not created!"); + if (cmpInstr->getOperand(0).isReg() && + cmpInstr->getOperand(0).isKill()) + cmpInstr->getOperand(0).setIsKill(false); + if (cmpInstr->getOperand(1).isReg() && + cmpInstr->getOperand(1).isKill()) + cmpInstr->getOperand(1).setIsKill(false); + cmpInstr->eraseFromParent(); + jmpInstr->eraseFromParent(); + ++nvjGenerated; + ++NumNVJGenerated; + break; + } + } + } + } + + return true; + +} + +FunctionPass *llvm::createHexagonNewValueJump() { + return new HexagonNewValueJump(); +} diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp index 2a9de92..2c23674 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -63,6 +63,7 @@ const uint16_t* HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction return CalleeSavedRegsV2; case HexagonSubtarget::V3: case HexagonSubtarget::V4: + case HexagonSubtarget::V5: return CalleeSavedRegsV3; } llvm_unreachable("Callee saved registers requested for unknown architecture " @@ -109,6 +110,7 @@ HexagonRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { return CalleeSavedRegClassesV2; case HexagonSubtarget::V3: case HexagonSubtarget::V4: + case HexagonSubtarget::V5: return CalleeSavedRegClassesV3; } llvm_unreachable("Callee saved register classes requested for unknown " @@ -179,13 +181,15 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // r0 = add(r30, #10000) // r0 = memw(r0) if ( (MI.getOpcode() == Hexagon::LDriw) || - (MI.getOpcode() == Hexagon::LDrid) || - (MI.getOpcode() == Hexagon::LDrih) || - (MI.getOpcode() == Hexagon::LDriuh) || - (MI.getOpcode() == Hexagon::LDrib) || - (MI.getOpcode() == Hexagon::LDriub) ) { + (MI.getOpcode() == Hexagon::LDrid) || + (MI.getOpcode() == Hexagon::LDrih) || + (MI.getOpcode() == Hexagon::LDriuh) || + (MI.getOpcode() == Hexagon::LDrib) || + (MI.getOpcode() == Hexagon::LDriub) || + (MI.getOpcode() == Hexagon::LDriw_f) || + (MI.getOpcode() == Hexagon::LDrid_f)) { unsigned dstReg = (MI.getOpcode() == Hexagon::LDrid) ? - *getSubRegisters(MI.getOperand(0).getReg()) : + getSubReg(MI.getOperand(0).getReg(), Hexagon::subreg_loreg) : MI.getOperand(0).getReg(); // Check if offset can fit in addi. @@ -203,10 +207,13 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, MI.getOperand(i).ChangeToRegister(dstReg, false, false, true); MI.getOperand(i+1).ChangeToImmediate(0); - } else if ((MI.getOpcode() == Hexagon::STriw) || + } else if ((MI.getOpcode() == Hexagon::STriw_indexed) || + (MI.getOpcode() == Hexagon::STriw) || (MI.getOpcode() == Hexagon::STrid) || (MI.getOpcode() == Hexagon::STrih) || - (MI.getOpcode() == Hexagon::STrib)) { + (MI.getOpcode() == Hexagon::STrib) || + (MI.getOpcode() == Hexagon::STrid_f) || + (MI.getOpcode() == Hexagon::STriw_f)) { // For stores, we need a reserved register. Change // memw(r30 + #10000) = r0 to: // diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.h b/lib/Target/Hexagon/HexagonRegisterInfo.h index 6cf727b..85355ae 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.h +++ b/lib/Target/Hexagon/HexagonRegisterInfo.h @@ -73,6 +73,10 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo { return true; } + bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const { + return true; + } + // Debug information queries. unsigned getRARegister() const; unsigned getFrameRegister(const MachineFunction &MF) const; diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td index d44eae3..fe41fc3 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.td +++ b/lib/Target/Hexagon/HexagonRegisterInfo.td @@ -131,6 +131,9 @@ let Namespace = "Hexagon" in { def SA1 : Rc<2, "sa1">, DwarfRegNum<[69]>; def LC1 : Rc<3, "lc1">, DwarfRegNum<[70]>; + def M0 : Rc<6, "m0">, DwarfRegNum<[71]>; + def M1 : Rc<7, "m1">, DwarfRegNum<[72]>; + def PC : Rc<9, "pc">, DwarfRegNum<[32]>; // is the Dwarf number correct? def GP : Rc<11, "gp">, DwarfRegNum<[33]>; // is the Dwarf number correct? } @@ -140,19 +143,15 @@ let Namespace = "Hexagon" in { // FIXME: the register order should be defined in terms of the preferred // allocation order... // -def IntRegs : RegisterClass<"Hexagon", [i32], 32, +def IntRegs : RegisterClass<"Hexagon", [i32,f32], 32, (add (sequence "R%u", 0, 9), (sequence "R%u", 12, 28), R10, R11, R29, R30, R31)> { } - - -def DoubleRegs : RegisterClass<"Hexagon", [i64], 64, +def DoubleRegs : RegisterClass<"Hexagon", [i64,f64], 64, (add (sequence "D%u", 0, 4), - (sequence "D%u", 6, 13), D5, D14, D15)> { - let SubRegClasses = [(IntRegs subreg_loreg, subreg_hireg)]; -} + (sequence "D%u", 6, 13), D5, D14, D15)>; def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))> @@ -162,6 +161,7 @@ def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))> def CRRegs : RegisterClass<"Hexagon", [i32], 32, (add (sequence "LC%u", 0, 1), - (sequence "SA%u", 0, 1), PC, GP)> { + (sequence "SA%u", 0, 1), + (sequence "M%u", 0, 1), PC, GP)> { let Size = 32; } diff --git a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp index 66a00e1..2468f0b 100644 --- a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp +++ b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp @@ -1,4 +1,4 @@ -//===- HexagonRemoveExtendArgs.cpp - Remove unecessary argument sign extends =// +//===- HexagonRemoveExtendArgs.cpp - Remove unnecessary argument sign extends // // // The LLVM Compiler Infrastructure // diff --git a/lib/Target/Hexagon/HexagonSchedule.td b/lib/Target/Hexagon/HexagonSchedule.td index fbea445..d1076b8 100644 --- a/lib/Target/Hexagon/HexagonSchedule.td +++ b/lib/Target/Hexagon/HexagonSchedule.td @@ -13,7 +13,6 @@ def LSUNIT : FuncUnit; def MUNIT : FuncUnit; def SUNIT : FuncUnit; - // Itinerary classes def ALU32 : InstrItinClass; def ALU64 : InstrItinClass; @@ -24,23 +23,31 @@ def LD : InstrItinClass; def M : InstrItinClass; def ST : InstrItinClass; def S : InstrItinClass; +def SYS : InstrItinClass; +def MARKER : InstrItinClass; def PSEUDO : InstrItinClass; - def HexagonItineraries : - ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [ - InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>, - InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>, - InstrItinData<CR , [InstrStage<1, [SUNIT]>]>, - InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>, - InstrItinData<JR , [InstrStage<1, [MUNIT]>]>, - InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>, - InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>, - InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>, - InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>, - InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]> -]>; - + ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [ + InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>, + InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>, + InstrItinData<CR , [InstrStage<1, [SUNIT]>]>, + InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>, + InstrItinData<JR , [InstrStage<1, [MUNIT]>]>, + InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>, + InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>, + InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>, + InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>, + InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>, + InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>, + InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]> + ]>; + +def HexagonModel : SchedMachineModel { + // Max issue per cycle == bundle width. + let IssueWidth = 4; + let Itineraries = HexagonItineraries; +} //===----------------------------------------------------------------------===// // V4 Machine Info + diff --git a/lib/Target/Hexagon/HexagonScheduleV4.td b/lib/Target/Hexagon/HexagonScheduleV4.td index 4cf66fe..9b41126 100644 --- a/lib/Target/Hexagon/HexagonScheduleV4.td +++ b/lib/Target/Hexagon/HexagonScheduleV4.td @@ -23,7 +23,6 @@ // | SLOT3 | XTYPE ALU32 J CR | // |===========|==================================================| - // Functional Units. def SLOT0 : FuncUnit; def SLOT1 : FuncUnit; @@ -34,22 +33,32 @@ def SLOT3 : FuncUnit; def NV_V4 : InstrItinClass; def MEM_V4 : InstrItinClass; // ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4. +def PREFIX : InstrItinClass; + +def HexagonItinerariesV4 : + ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3], [], [ + InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, + InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>, + InstrItinData<CR , [InstrStage<1, [SLOT3]>]>, + InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>, + InstrItinData<JR , [InstrStage<1, [SLOT2]>]>, + InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>, + InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>, + InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>, + InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>, + InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>, + InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>, + InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>, + InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, + InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, + InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]> + ]>; -def HexagonItinerariesV4 : ProcessorItineraries< - [SLOT0, SLOT1, SLOT2, SLOT3], [], [ - InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>, - InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>, - InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, - InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>, - InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>, - InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>, - InstrItinData<JR , [InstrStage<1, [SLOT2]>]>, - InstrItinData<CR , [InstrStage<1, [SLOT3]>]>, - InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, - InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>, - InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>, - InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]> -]>; +def HexagonModelV4 : SchedMachineModel { + // Max issue per cycle == bundle width. + let IssueWidth = 4; + let Itineraries = HexagonItinerariesV4; +} //===----------------------------------------------------------------------===// // Hexagon V4 Resource Definitions - diff --git a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp index d10c9f2..a81cd91 100644 --- a/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp +++ b/lib/Target/Hexagon/HexagonSplitTFRCondSets.cpp @@ -14,7 +14,7 @@ // {p0 = cmp.eq(r0,r1)} // {r3 = mux(p0,#1,#3)} // -// This requires two packets. If we use .new predicated immediate transfers, +// This requires two packets. If we use .new predicated immediate transfers, // then we can do this in a single packet, e.g.: // // {p0 = cmp.eq(r0,r1) @@ -81,40 +81,126 @@ bool HexagonSplitTFRCondSets::runOnMachineFunction(MachineFunction &Fn) { for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end(); ++MII) { MachineInstr *MI = MII; - int Opc = MI->getOpcode(); - if (Opc == Hexagon::TFR_condset_rr) { - - int DestReg = MI->getOperand(0).getReg(); - int SrcReg1 = MI->getOperand(2).getReg(); - int SrcReg2 = MI->getOperand(3).getReg(); - - // Minor optimization: do not emit the predicated copy if the source and - // the destination is the same register - if (DestReg != SrcReg1) { - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cPt), - DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1); + int Opc1, Opc2; + switch(MI->getOpcode()) { + case Hexagon::TFR_condset_rr: + case Hexagon::TFR_condset_rr_f: + case Hexagon::TFR_condset_rr64_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg1 = MI->getOperand(2).getReg(); + int SrcReg2 = MI->getOperand(3).getReg(); + + if (MI->getOpcode() == Hexagon::TFR_condset_rr || + MI->getOpcode() == Hexagon::TFR_condset_rr_f) { + Opc1 = Hexagon::TFR_cPt; + Opc2 = Hexagon::TFR_cNotPt; + } + else if (MI->getOpcode() == Hexagon::TFR_condset_rr64_f) { + Opc1 = Hexagon::TFR64_cPt; + Opc2 = Hexagon::TFR64_cNotPt; + } + + // Minor optimization: do not emit the predicated copy if the source + // and the destination is the same register. + if (DestReg != SrcReg1) { + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc1), + DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg1); + } + if (DestReg != SrcReg2) { + BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Opc2), + DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2); + } + MII = MBB->erase(MI); + --MII; + break; } - if (DestReg != SrcReg2) { - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFR_cNotPt), - DestReg).addReg(MI->getOperand(1).getReg()).addReg(SrcReg2); + case Hexagon::TFR_condset_ri: + case Hexagon::TFR_condset_ri_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg1 = MI->getOperand(2).getReg(); + + // Do not emit the predicated copy if the source and the destination + // is the same register. + if (DestReg != SrcReg1) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFR_cPt), DestReg). + addReg(MI->getOperand(1).getReg()).addReg(SrcReg1); + } + if (MI->getOpcode() == Hexagon::TFR_condset_ri ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cNotPt), DestReg). + addReg(MI->getOperand(1).getReg()). + addImm(MI->getOperand(3).getImm()); + } else if (MI->getOpcode() == Hexagon::TFR_condset_ri_f ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cNotPt_f), DestReg). + addReg(MI->getOperand(1).getReg()). + addFPImm(MI->getOperand(3).getFPImm()); + } + + MII = MBB->erase(MI); + --MII; + break; + } + case Hexagon::TFR_condset_ir: + case Hexagon::TFR_condset_ir_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg2 = MI->getOperand(3).getReg(); + + if (MI->getOpcode() == Hexagon::TFR_condset_ir ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cPt), DestReg). + addReg(MI->getOperand(1).getReg()). + addImm(MI->getOperand(2).getImm()); + } else if (MI->getOpcode() == Hexagon::TFR_condset_ir_f ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cPt_f), DestReg). + addReg(MI->getOperand(1).getReg()). + addFPImm(MI->getOperand(2).getFPImm()); + } + + // Do not emit the predicated copy if the source and + // the destination is the same register. + if (DestReg != SrcReg2) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFR_cNotPt), DestReg). + addReg(MI->getOperand(1).getReg()).addReg(SrcReg2); + } + MII = MBB->erase(MI); + --MII; + break; + } + case Hexagon::TFR_condset_ii: + case Hexagon::TFR_condset_ii_f: { + int DestReg = MI->getOperand(0).getReg(); + int SrcReg1 = MI->getOperand(1).getReg(); + + if (MI->getOpcode() == Hexagon::TFR_condset_ii ) { + int Immed1 = MI->getOperand(2).getImm(); + int Immed2 = MI->getOperand(3).getImm(); + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cPt), + DestReg).addReg(SrcReg1).addImm(Immed1); + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cNotPt), + DestReg).addReg(SrcReg1).addImm(Immed2); + } else if (MI->getOpcode() == Hexagon::TFR_condset_ii_f ) { + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cPt_f), DestReg). + addReg(SrcReg1). + addFPImm(MI->getOperand(2).getFPImm()); + BuildMI(*MBB, MII, MI->getDebugLoc(), + TII->get(Hexagon::TFRI_cNotPt_f), DestReg). + addReg(SrcReg1). + addFPImm(MI->getOperand(3).getFPImm()); + } + MII = MBB->erase(MI); + --MII; + break; } - MII = MBB->erase(MI); - --MII; - } else if (Opc == Hexagon::TFR_condset_ii) { - int DestReg = MI->getOperand(0).getReg(); - int SrcReg1 = MI->getOperand(1).getReg(); - int Immed1 = MI->getOperand(2).getImm(); - int Immed2 = MI->getOperand(3).getImm(); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cPt), - DestReg).addReg(SrcReg1).addImm(Immed1); - BuildMI(*MBB, MII, MI->getDebugLoc(), TII->get(Hexagon::TFRI_cNotPt), - DestReg).addReg(SrcReg1).addImm(Immed2); - MII = MBB->erase(MI); - --MII; } } } - return true; } diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp index 654d336..5d087db 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -13,6 +13,7 @@ #include "HexagonSubtarget.h" #include "Hexagon.h" +#include "HexagonRegisterInfo.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" using namespace llvm; @@ -29,11 +30,17 @@ static cl::opt<bool> EnableMemOps( "enable-hexagon-memops", cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, - cl::desc("Generate V4 MEMOP in code generation for Hexagon target")); + cl::desc("Generate V4 memop instructions.")); + +static cl::opt<bool> +EnableIEEERndNear( + "enable-hexagon-ieee-rnd-near", + cl::Hidden, cl::ZeroOrMore, cl::init(false), + cl::desc("Generate non-chopped conversion from fp to int.")); HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS): HexagonGenSubtargetInfo(TT, CPU, FS), - HexagonArchVersion(V1), + HexagonArchVersion(V2), CPUString(CPU.str()) { ParseSubtargetFeatures(CPU, FS); @@ -45,18 +52,27 @@ HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS): break; case HexagonSubtarget::V4: break; + case HexagonSubtarget::V5: + break; default: - llvm_unreachable("Unknown Architecture Version."); + // If the programmer has not specified a Hexagon version, default + // to -mv4. + CPUString = "hexagonv4"; + HexagonArchVersion = HexagonSubtarget::V4; + break; } // Initialize scheduling itinerary for the specified CPU. InstrItins = getInstrItineraryForCPU(CPUString); - // Max issue per cycle == bundle width. - InstrItins.IssueWidth = 4; - if (EnableMemOps) UseMemOps = true; else UseMemOps = false; + + if (EnableIEEERndNear) + ModeIEEERndNear = true; + else + ModeIEEERndNear = false; } + diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h index 3079086..5d9d6d8 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.h +++ b/lib/Target/Hexagon/HexagonSubtarget.h @@ -22,16 +22,18 @@ #include "HexagonGenSubtargetInfo.inc" #define Hexagon_SMALL_DATA_THRESHOLD 8 +#define Hexagon_SLOTS 4 namespace llvm { class HexagonSubtarget : public HexagonGenSubtargetInfo { bool UseMemOps; + bool ModeIEEERndNear; public: enum HexagonArchEnum { - V1, V2, V3, V4 + V1, V2, V3, V4, V5 }; HexagonArchEnum HexagonArchVersion; @@ -55,7 +57,11 @@ public: bool hasV3TOps () const { return HexagonArchVersion >= V3; } bool hasV3TOpsOnly () const { return HexagonArchVersion == V3; } bool hasV4TOps () const { return HexagonArchVersion >= V4; } + bool hasV4TOpsOnly () const { return HexagonArchVersion == V4; } bool useMemOps () const { return HexagonArchVersion >= V4 && UseMemOps; } + bool hasV5TOps () const { return HexagonArchVersion >= V5; } + bool hasV5TOpsOnly () const { return HexagonArchVersion == V5; } + bool modeIEEERndNear () const { return ModeIEEERndNear; } bool isSubtargetV2() const { return HexagonArchVersion == V2;} const std::string &getCPUString () const { return CPUString; } diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index 55bbba7..a7b291f 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -55,7 +55,9 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT, CodeModel::Model CM, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - DataLayout("e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-a0:0") , + DataLayout("e-p:32:32:32-" + "i64:64:64-i32:32:32-i16:16:16-i1:32:32-" + "f64:64:64-f32:32:32-a0:0-n32") , Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this), TSInfo(*this), FrameLowering(Subtarget), @@ -100,43 +102,47 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) { } bool HexagonPassConfig::addInstSelector() { - PM->add(createHexagonRemoveExtendOps(getHexagonTargetMachine())); - PM->add(createHexagonISelDag(getHexagonTargetMachine())); - PM->add(createHexagonPeephole()); + addPass(createHexagonRemoveExtendOps(getHexagonTargetMachine())); + addPass(createHexagonISelDag(getHexagonTargetMachine())); + addPass(createHexagonPeephole()); return false; } bool HexagonPassConfig::addPreRegAlloc() { if (!DisableHardwareLoops) { - PM->add(createHexagonHardwareLoops()); + addPass(createHexagonHardwareLoops()); } - return false; } bool HexagonPassConfig::addPostRegAlloc() { - PM->add(createHexagonCFGOptimizer(getHexagonTargetMachine())); + addPass(createHexagonCFGOptimizer(getHexagonTargetMachine())); return true; } bool HexagonPassConfig::addPreSched2() { - addPass(IfConverterID); + addPass(&IfConverterID); return true; } bool HexagonPassConfig::addPreEmitPass() { if (!DisableHardwareLoops) { - PM->add(createHexagonFixupHwLoops()); + addPass(createHexagonFixupHwLoops()); } + addPass(createHexagonNewValueJump()); + // Expand Spill code for predicate registers. - PM->add(createHexagonExpandPredSpillCode(getHexagonTargetMachine())); + addPass(createHexagonExpandPredSpillCode(getHexagonTargetMachine())); // Split up TFRcondsets into conditional transfers. - PM->add(createHexagonSplitTFRCondSets(getHexagonTargetMachine())); + addPass(createHexagonSplitTFRCondSets(getHexagonTargetMachine())); + + // Create Packets. + addPass(createHexagonPacketizer()); return false; } diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp new file mode 100644 index 0000000..a03ed03 --- /dev/null +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -0,0 +1,3646 @@ +//===----- HexagonPacketizer.cpp - vliw packetizer ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This implements a simple VLIW packetizer using DFA. The packetizer works on +// machine basic blocks. For each instruction I in BB, the packetizer consults +// the DFA to see if machine resources are available to execute I. If so, the +// packetizer checks if I depends on any instruction J in the current packet. +// If no dependency is found, I is added to current packet and machine resource +// is marked as taken. If any dependency is found, a target API call is made to +// prune the dependence. +// +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "packets" +#include "llvm/CodeGen/DFAPacketizer.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/ScheduleDAG.h" +#include "llvm/CodeGen/ScheduleDAGInstrs.h" +#include "llvm/CodeGen/LatencyPriorityQueue.h" +#include "llvm/CodeGen/SchedulerRegistry.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/MachineFunctionAnalysis.h" +#include "llvm/CodeGen/ScheduleHazardRecognizer.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/MC/MCInstrItineraries.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "Hexagon.h" +#include "HexagonTargetMachine.h" +#include "HexagonRegisterInfo.h" +#include "HexagonSubtarget.h" +#include "HexagonMachineFunctionInfo.h" + +#include <map> + +using namespace llvm; + +namespace { + class HexagonPacketizer : public MachineFunctionPass { + + public: + static char ID; + HexagonPacketizer() : MachineFunctionPass(ID) {} + + void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + AU.addRequired<MachineDominatorTree>(); + AU.addPreserved<MachineDominatorTree>(); + AU.addRequired<MachineLoopInfo>(); + AU.addPreserved<MachineLoopInfo>(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + const char *getPassName() const { + return "Hexagon Packetizer"; + } + + bool runOnMachineFunction(MachineFunction &Fn); + }; + char HexagonPacketizer::ID = 0; + + class HexagonPacketizerList : public VLIWPacketizerList { + + private: + + // Has the instruction been promoted to a dot-new instruction. + bool PromotedToDotNew; + + // Has the instruction been glued to allocframe. + bool GlueAllocframeStore; + + // Has the feeder instruction been glued to new value jump. + bool GlueToNewValueJump; + + // Check if there is a dependence between some instruction already in this + // packet and this instruction. + bool Dependence; + + // Only check for dependence if there are resources available to + // schedule this instruction. + bool FoundSequentialDependence; + + public: + // Ctor. + HexagonPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI, + MachineDominatorTree &MDT); + + // initPacketizerState - initialize some internal flags. + void initPacketizerState(); + + // ignorePseudoInstruction - Ignore bundling of pseudo instructions. + bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB); + + // isSoloInstruction - return true if instruction MI can not be packetized + // with any other instruction, which means that MI itself is a packet. + bool isSoloInstruction(MachineInstr *MI); + + // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ + // together. + bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ); + + // isLegalToPruneDependencies - Is it legal to prune dependece between SUI + // and SUJ. + bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ); + + MachineBasicBlock::iterator addToPacket(MachineInstr *MI); + private: + bool IsCallDependent(MachineInstr* MI, SDep::Kind DepType, unsigned DepReg); + bool PromoteToDotNew(MachineInstr* MI, SDep::Kind DepType, + MachineBasicBlock::iterator &MII, + const TargetRegisterClass* RC); + bool CanPromoteToDotNew(MachineInstr* MI, SUnit* PacketSU, + unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit, + MachineBasicBlock::iterator &MII, + const TargetRegisterClass* RC); + bool CanPromoteToNewValue(MachineInstr* MI, SUnit* PacketSU, + unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit, + MachineBasicBlock::iterator &MII); + bool CanPromoteToNewValueStore(MachineInstr* MI, MachineInstr* PacketMI, + unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit); + bool DemoteToDotOld(MachineInstr* MI); + bool ArePredicatesComplements(MachineInstr* MI1, MachineInstr* MI2, + std::map <MachineInstr*, SUnit*> MIToSUnit); + bool RestrictingDepExistInPacket(MachineInstr*, + unsigned, std::map <MachineInstr*, SUnit*>); + bool isNewifiable(MachineInstr* MI); + bool isCondInst(MachineInstr* MI); + bool IsNewifyStore (MachineInstr* MI); + bool tryAllocateResourcesForConstExt(MachineInstr* MI); + bool canReserveResourcesForConstExt(MachineInstr *MI); + void reserveResourcesForConstExt(MachineInstr* MI); + bool isNewValueInst(MachineInstr* MI); + bool isDotNewInst(MachineInstr* MI); + }; +} + +// HexagonPacketizerList Ctor. +HexagonPacketizerList::HexagonPacketizerList( + MachineFunction &MF, MachineLoopInfo &MLI,MachineDominatorTree &MDT) + : VLIWPacketizerList(MF, MLI, MDT, true){ +} + +bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) { + const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo(); + MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); + MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); + + // Instantiate the packetizer. + HexagonPacketizerList Packetizer(Fn, MLI, MDT); + + // DFA state table should not be empty. + assert(Packetizer.getResourceTracker() && "Empty DFA table!"); + + // + // Loop over all basic blocks and remove KILL pseudo-instructions + // These instructions confuse the dependence analysis. Consider: + // D0 = ... (Insn 0) + // R0 = KILL R0, D0 (Insn 1) + // R0 = ... (Insn 2) + // Here, Insn 1 will result in the dependence graph not emitting an output + // dependence between Insn 0 and Insn 2. This can lead to incorrect + // packetization + // + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); + MBB != MBBe; ++MBB) { + MachineBasicBlock::iterator End = MBB->end(); + MachineBasicBlock::iterator MI = MBB->begin(); + while (MI != End) { + if (MI->isKill()) { + MachineBasicBlock::iterator DeleteMI = MI; + ++MI; + MBB->erase(DeleteMI); + End = MBB->end(); + continue; + } + ++MI; + } + } + + // Loop over all of the basic blocks. + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); + MBB != MBBe; ++MBB) { + // Find scheduling regions and schedule / packetize each region. + unsigned RemainingCount = MBB->size(); + for(MachineBasicBlock::iterator RegionEnd = MBB->end(); + RegionEnd != MBB->begin();) { + // The next region starts above the previous region. Look backward in the + // instruction stream until we find the nearest boundary. + MachineBasicBlock::iterator I = RegionEnd; + for(;I != MBB->begin(); --I, --RemainingCount) { + if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn)) + break; + } + I = MBB->begin(); + + // Skip empty scheduling regions. + if (I == RegionEnd) { + RegionEnd = llvm::prior(RegionEnd); + --RemainingCount; + continue; + } + // Skip regions with one instruction. + if (I == llvm::prior(RegionEnd)) { + RegionEnd = llvm::prior(RegionEnd); + continue; + } + + Packetizer.PacketizeMIs(MBB, I, RegionEnd); + RegionEnd = I; + } + } + + return true; +} + + +static bool IsIndirectCall(MachineInstr* MI) { + return ((MI->getOpcode() == Hexagon::CALLR) || + (MI->getOpcode() == Hexagon::CALLRv3)); +} + +// Reserve resources for constant extender. Trigure an assertion if +// reservation fail. +void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr* MI) { + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr( + QII->get(Hexagon::IMMEXT), MI->getDebugLoc()); + + if (ResourceTracker->canReserveResources(PseudoMI)) { + ResourceTracker->reserveResources(PseudoMI); + MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); + } else { + MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); + llvm_unreachable("can not reserve resources for constant extender."); + } + return; +} + +bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) { + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + assert(QII->isExtended(MI) && + "Should only be called for constant extended instructions"); + MachineFunction *MF = MI->getParent()->getParent(); + MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT), + MI->getDebugLoc()); + bool CanReserve = ResourceTracker->canReserveResources(PseudoMI); + MF->DeleteMachineInstr(PseudoMI); + return CanReserve; +} + +// Allocate resources (i.e. 4 bytes) for constant extender. If succeed, return +// true, otherwise, return false. +bool HexagonPacketizerList::tryAllocateResourcesForConstExt(MachineInstr* MI) { + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr( + QII->get(Hexagon::IMMEXT), MI->getDebugLoc()); + + if (ResourceTracker->canReserveResources(PseudoMI)) { + ResourceTracker->reserveResources(PseudoMI); + MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); + return true; + } else { + MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); + return false; + } +} + + +bool HexagonPacketizerList::IsCallDependent(MachineInstr* MI, + SDep::Kind DepType, + unsigned DepReg) { + + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + const HexagonRegisterInfo* QRI = + (const HexagonRegisterInfo *) TM.getRegisterInfo(); + + // Check for lr dependence + if (DepReg == QRI->getRARegister()) { + return true; + } + + if (QII->isDeallocRet(MI)) { + if (DepReg == QRI->getFrameRegister() || + DepReg == QRI->getStackRegister()) + return true; + } + + // Check if this is a predicate dependence + const TargetRegisterClass* RC = QRI->getMinimalPhysRegClass(DepReg); + if (RC == &Hexagon::PredRegsRegClass) { + return true; + } + + // + // Lastly check for an operand used in an indirect call + // If we had an attribute for checking if an instruction is an indirect call, + // then we could have avoided this relatively brittle implementation of + // IsIndirectCall() + // + // Assumes that the first operand of the CALLr is the function address + // + if (IsIndirectCall(MI) && (DepType == SDep::Data)) { + MachineOperand MO = MI->getOperand(0); + if (MO.isReg() && MO.isUse() && (MO.getReg() == DepReg)) { + return true; + } + } + + return false; +} + +static bool IsRegDependence(const SDep::Kind DepType) { + return (DepType == SDep::Data || DepType == SDep::Anti || + DepType == SDep::Output); +} + +static bool IsDirectJump(MachineInstr* MI) { + return (MI->getOpcode() == Hexagon::JMP); +} + +static bool IsSchedBarrier(MachineInstr* MI) { + switch (MI->getOpcode()) { + case Hexagon::BARRIER: + return true; + } + return false; +} + +static bool IsControlFlow(MachineInstr* MI) { + return (MI->getDesc().isTerminator() || MI->getDesc().isCall()); +} + +bool HexagonPacketizerList::isNewValueInst(MachineInstr* MI) { + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + if (QII->isNewValueJump(MI)) + return true; + + if (QII->isNewValueStore(MI)) + return true; + + return false; +} + +// Function returns true if an instruction can be promoted to the new-value +// store. It will always return false for v2 and v3. +// It lists all the conditional and unconditional stores that can be promoted +// to the new-value stores. + +bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) { + const HexagonRegisterInfo* QRI = + (const HexagonRegisterInfo *) TM.getRegisterInfo(); + switch (MI->getOpcode()) + { + // store byte + case Hexagon::STrib: + case Hexagon::STrib_indexed: + case Hexagon::STrib_indexed_shl_V4: + case Hexagon::STrib_shl_V4: + case Hexagon::STrib_GP_V4: + case Hexagon::STb_GP_V4: + case Hexagon::POST_STbri: + case Hexagon::STrib_cPt: + case Hexagon::STrib_cdnPt_V4: + case Hexagon::STrib_cNotPt: + case Hexagon::STrib_cdnNotPt_V4: + case Hexagon::STrib_indexed_cPt: + case Hexagon::STrib_indexed_cdnPt_V4: + case Hexagon::STrib_indexed_cNotPt: + case Hexagon::STrib_indexed_cdnNotPt_V4: + case Hexagon::STrib_indexed_shl_cPt_V4: + case Hexagon::STrib_indexed_shl_cdnPt_V4: + case Hexagon::STrib_indexed_shl_cNotPt_V4: + case Hexagon::STrib_indexed_shl_cdnNotPt_V4: + case Hexagon::POST_STbri_cPt: + case Hexagon::POST_STbri_cdnPt_V4: + case Hexagon::POST_STbri_cNotPt: + case Hexagon::POST_STbri_cdnNotPt_V4: + case Hexagon::STb_GP_cPt_V4: + case Hexagon::STb_GP_cNotPt_V4: + case Hexagon::STb_GP_cdnPt_V4: + case Hexagon::STb_GP_cdnNotPt_V4: + case Hexagon::STrib_GP_cPt_V4: + case Hexagon::STrib_GP_cNotPt_V4: + case Hexagon::STrib_GP_cdnPt_V4: + case Hexagon::STrib_GP_cdnNotPt_V4: + + // store halfword + case Hexagon::STrih: + case Hexagon::STrih_indexed: + case Hexagon::STrih_indexed_shl_V4: + case Hexagon::STrih_shl_V4: + case Hexagon::STrih_GP_V4: + case Hexagon::STh_GP_V4: + case Hexagon::POST_SThri: + case Hexagon::STrih_cPt: + case Hexagon::STrih_cdnPt_V4: + case Hexagon::STrih_cNotPt: + case Hexagon::STrih_cdnNotPt_V4: + case Hexagon::STrih_indexed_cPt: + case Hexagon::STrih_indexed_cdnPt_V4: + case Hexagon::STrih_indexed_cNotPt: + case Hexagon::STrih_indexed_cdnNotPt_V4: + case Hexagon::STrih_indexed_shl_cPt_V4: + case Hexagon::STrih_indexed_shl_cdnPt_V4: + case Hexagon::STrih_indexed_shl_cNotPt_V4: + case Hexagon::STrih_indexed_shl_cdnNotPt_V4: + case Hexagon::POST_SThri_cPt: + case Hexagon::POST_SThri_cdnPt_V4: + case Hexagon::POST_SThri_cNotPt: + case Hexagon::POST_SThri_cdnNotPt_V4: + case Hexagon::STh_GP_cPt_V4: + case Hexagon::STh_GP_cNotPt_V4: + case Hexagon::STh_GP_cdnPt_V4: + case Hexagon::STh_GP_cdnNotPt_V4: + case Hexagon::STrih_GP_cPt_V4: + case Hexagon::STrih_GP_cNotPt_V4: + case Hexagon::STrih_GP_cdnPt_V4: + case Hexagon::STrih_GP_cdnNotPt_V4: + + // store word + case Hexagon::STriw: + case Hexagon::STriw_indexed: + case Hexagon::STriw_indexed_shl_V4: + case Hexagon::STriw_shl_V4: + case Hexagon::STriw_GP_V4: + case Hexagon::STw_GP_V4: + case Hexagon::POST_STwri: + case Hexagon::STriw_cPt: + case Hexagon::STriw_cdnPt_V4: + case Hexagon::STriw_cNotPt: + case Hexagon::STriw_cdnNotPt_V4: + case Hexagon::STriw_indexed_cPt: + case Hexagon::STriw_indexed_cdnPt_V4: + case Hexagon::STriw_indexed_cNotPt: + case Hexagon::STriw_indexed_cdnNotPt_V4: + case Hexagon::STriw_indexed_shl_cPt_V4: + case Hexagon::STriw_indexed_shl_cdnPt_V4: + case Hexagon::STriw_indexed_shl_cNotPt_V4: + case Hexagon::STriw_indexed_shl_cdnNotPt_V4: + case Hexagon::POST_STwri_cPt: + case Hexagon::POST_STwri_cdnPt_V4: + case Hexagon::POST_STwri_cNotPt: + case Hexagon::POST_STwri_cdnNotPt_V4: + case Hexagon::STw_GP_cPt_V4: + case Hexagon::STw_GP_cNotPt_V4: + case Hexagon::STw_GP_cdnPt_V4: + case Hexagon::STw_GP_cdnNotPt_V4: + case Hexagon::STriw_GP_cPt_V4: + case Hexagon::STriw_GP_cNotPt_V4: + case Hexagon::STriw_GP_cdnPt_V4: + case Hexagon::STriw_GP_cdnNotPt_V4: + return QRI->Subtarget.hasV4TOps(); + } + return false; +} + +static bool IsLoopN(MachineInstr *MI) { + return (MI->getOpcode() == Hexagon::LOOP0_i || + MI->getOpcode() == Hexagon::LOOP0_r); +} + +/// DoesModifyCalleeSavedReg - Returns true if the instruction modifies a +/// callee-saved register. +static bool DoesModifyCalleeSavedReg(MachineInstr *MI, + const TargetRegisterInfo *TRI) { + for (const uint16_t *CSR = TRI->getCalleeSavedRegs(); *CSR; ++CSR) { + unsigned CalleeSavedReg = *CSR; + if (MI->modifiesRegister(CalleeSavedReg, TRI)) + return true; + } + return false; +} + +// Return the new value instruction for a given store. +static int GetDotNewOp(const int opc) { + switch (opc) { + default: llvm_unreachable("Unknown .new type"); + // store new value byte + case Hexagon::STrib: + return Hexagon::STrib_nv_V4; + + case Hexagon::STrib_indexed: + return Hexagon::STrib_indexed_nv_V4; + + case Hexagon::STrib_indexed_shl_V4: + return Hexagon::STrib_indexed_shl_nv_V4; + + case Hexagon::STrib_shl_V4: + return Hexagon::STrib_shl_nv_V4; + + case Hexagon::STrib_GP_V4: + return Hexagon::STrib_GP_nv_V4; + + case Hexagon::STb_GP_V4: + return Hexagon::STb_GP_nv_V4; + + case Hexagon::POST_STbri: + return Hexagon::POST_STbri_nv_V4; + + case Hexagon::STrib_cPt: + return Hexagon::STrib_cPt_nv_V4; + + case Hexagon::STrib_cdnPt_V4: + return Hexagon::STrib_cdnPt_nv_V4; + + case Hexagon::STrib_cNotPt: + return Hexagon::STrib_cNotPt_nv_V4; + + case Hexagon::STrib_cdnNotPt_V4: + return Hexagon::STrib_cdnNotPt_nv_V4; + + case Hexagon::STrib_indexed_cPt: + return Hexagon::STrib_indexed_cPt_nv_V4; + + case Hexagon::STrib_indexed_cdnPt_V4: + return Hexagon::STrib_indexed_cdnPt_nv_V4; + + case Hexagon::STrib_indexed_cNotPt: + return Hexagon::STrib_indexed_cNotPt_nv_V4; + + case Hexagon::STrib_indexed_cdnNotPt_V4: + return Hexagon::STrib_indexed_cdnNotPt_nv_V4; + + case Hexagon::STrib_indexed_shl_cPt_V4: + return Hexagon::STrib_indexed_shl_cPt_nv_V4; + + case Hexagon::STrib_indexed_shl_cdnPt_V4: + return Hexagon::STrib_indexed_shl_cdnPt_nv_V4; + + case Hexagon::STrib_indexed_shl_cNotPt_V4: + return Hexagon::STrib_indexed_shl_cNotPt_nv_V4; + + case Hexagon::STrib_indexed_shl_cdnNotPt_V4: + return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4; + + case Hexagon::POST_STbri_cPt: + return Hexagon::POST_STbri_cPt_nv_V4; + + case Hexagon::POST_STbri_cdnPt_V4: + return Hexagon::POST_STbri_cdnPt_nv_V4; + + case Hexagon::POST_STbri_cNotPt: + return Hexagon::POST_STbri_cNotPt_nv_V4; + + case Hexagon::POST_STbri_cdnNotPt_V4: + return Hexagon::POST_STbri_cdnNotPt_nv_V4; + + case Hexagon::STb_GP_cPt_V4: + return Hexagon::STb_GP_cPt_nv_V4; + + case Hexagon::STb_GP_cNotPt_V4: + return Hexagon::STb_GP_cNotPt_nv_V4; + + case Hexagon::STb_GP_cdnPt_V4: + return Hexagon::STb_GP_cdnPt_nv_V4; + + case Hexagon::STb_GP_cdnNotPt_V4: + return Hexagon::STb_GP_cdnNotPt_nv_V4; + + case Hexagon::STrib_GP_cPt_V4: + return Hexagon::STrib_GP_cPt_nv_V4; + + case Hexagon::STrib_GP_cNotPt_V4: + return Hexagon::STrib_GP_cNotPt_nv_V4; + + case Hexagon::STrib_GP_cdnPt_V4: + return Hexagon::STrib_GP_cdnPt_nv_V4; + + case Hexagon::STrib_GP_cdnNotPt_V4: + return Hexagon::STrib_GP_cdnNotPt_nv_V4; + + // store new value halfword + case Hexagon::STrih: + return Hexagon::STrih_nv_V4; + + case Hexagon::STrih_indexed: + return Hexagon::STrih_indexed_nv_V4; + + case Hexagon::STrih_indexed_shl_V4: + return Hexagon::STrih_indexed_shl_nv_V4; + + case Hexagon::STrih_shl_V4: + return Hexagon::STrih_shl_nv_V4; + + case Hexagon::STrih_GP_V4: + return Hexagon::STrih_GP_nv_V4; + + case Hexagon::STh_GP_V4: + return Hexagon::STh_GP_nv_V4; + + case Hexagon::POST_SThri: + return Hexagon::POST_SThri_nv_V4; + + case Hexagon::STrih_cPt: + return Hexagon::STrih_cPt_nv_V4; + + case Hexagon::STrih_cdnPt_V4: + return Hexagon::STrih_cdnPt_nv_V4; + + case Hexagon::STrih_cNotPt: + return Hexagon::STrih_cNotPt_nv_V4; + + case Hexagon::STrih_cdnNotPt_V4: + return Hexagon::STrih_cdnNotPt_nv_V4; + + case Hexagon::STrih_indexed_cPt: + return Hexagon::STrih_indexed_cPt_nv_V4; + + case Hexagon::STrih_indexed_cdnPt_V4: + return Hexagon::STrih_indexed_cdnPt_nv_V4; + + case Hexagon::STrih_indexed_cNotPt: + return Hexagon::STrih_indexed_cNotPt_nv_V4; + + case Hexagon::STrih_indexed_cdnNotPt_V4: + return Hexagon::STrih_indexed_cdnNotPt_nv_V4; + + case Hexagon::STrih_indexed_shl_cPt_V4: + return Hexagon::STrih_indexed_shl_cPt_nv_V4; + + case Hexagon::STrih_indexed_shl_cdnPt_V4: + return Hexagon::STrih_indexed_shl_cdnPt_nv_V4; + + case Hexagon::STrih_indexed_shl_cNotPt_V4: + return Hexagon::STrih_indexed_shl_cNotPt_nv_V4; + + case Hexagon::STrih_indexed_shl_cdnNotPt_V4: + return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4; + + case Hexagon::POST_SThri_cPt: + return Hexagon::POST_SThri_cPt_nv_V4; + + case Hexagon::POST_SThri_cdnPt_V4: + return Hexagon::POST_SThri_cdnPt_nv_V4; + + case Hexagon::POST_SThri_cNotPt: + return Hexagon::POST_SThri_cNotPt_nv_V4; + + case Hexagon::POST_SThri_cdnNotPt_V4: + return Hexagon::POST_SThri_cdnNotPt_nv_V4; + + case Hexagon::STh_GP_cPt_V4: + return Hexagon::STh_GP_cPt_nv_V4; + + case Hexagon::STh_GP_cNotPt_V4: + return Hexagon::STh_GP_cNotPt_nv_V4; + + case Hexagon::STh_GP_cdnPt_V4: + return Hexagon::STh_GP_cdnPt_nv_V4; + + case Hexagon::STh_GP_cdnNotPt_V4: + return Hexagon::STh_GP_cdnNotPt_nv_V4; + + case Hexagon::STrih_GP_cPt_V4: + return Hexagon::STrih_GP_cPt_nv_V4; + + case Hexagon::STrih_GP_cNotPt_V4: + return Hexagon::STrih_GP_cNotPt_nv_V4; + + case Hexagon::STrih_GP_cdnPt_V4: + return Hexagon::STrih_GP_cdnPt_nv_V4; + + case Hexagon::STrih_GP_cdnNotPt_V4: + return Hexagon::STrih_GP_cdnNotPt_nv_V4; + + // store new value word + case Hexagon::STriw: + return Hexagon::STriw_nv_V4; + + case Hexagon::STriw_indexed: + return Hexagon::STriw_indexed_nv_V4; + + case Hexagon::STriw_indexed_shl_V4: + return Hexagon::STriw_indexed_shl_nv_V4; + + case Hexagon::STriw_shl_V4: + return Hexagon::STriw_shl_nv_V4; + + case Hexagon::STriw_GP_V4: + return Hexagon::STriw_GP_nv_V4; + + case Hexagon::STw_GP_V4: + return Hexagon::STw_GP_nv_V4; + + case Hexagon::POST_STwri: + return Hexagon::POST_STwri_nv_V4; + + case Hexagon::STriw_cPt: + return Hexagon::STriw_cPt_nv_V4; + + case Hexagon::STriw_cdnPt_V4: + return Hexagon::STriw_cdnPt_nv_V4; + + case Hexagon::STriw_cNotPt: + return Hexagon::STriw_cNotPt_nv_V4; + + case Hexagon::STriw_cdnNotPt_V4: + return Hexagon::STriw_cdnNotPt_nv_V4; + + case Hexagon::STriw_indexed_cPt: + return Hexagon::STriw_indexed_cPt_nv_V4; + + case Hexagon::STriw_indexed_cdnPt_V4: + return Hexagon::STriw_indexed_cdnPt_nv_V4; + + case Hexagon::STriw_indexed_cNotPt: + return Hexagon::STriw_indexed_cNotPt_nv_V4; + + case Hexagon::STriw_indexed_cdnNotPt_V4: + return Hexagon::STriw_indexed_cdnNotPt_nv_V4; + + case Hexagon::STriw_indexed_shl_cPt_V4: + return Hexagon::STriw_indexed_shl_cPt_nv_V4; + + case Hexagon::STriw_indexed_shl_cdnPt_V4: + return Hexagon::STriw_indexed_shl_cdnPt_nv_V4; + + case Hexagon::STriw_indexed_shl_cNotPt_V4: + return Hexagon::STriw_indexed_shl_cNotPt_nv_V4; + + case Hexagon::STriw_indexed_shl_cdnNotPt_V4: + return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4; + + case Hexagon::POST_STwri_cPt: + return Hexagon::POST_STwri_cPt_nv_V4; + + case Hexagon::POST_STwri_cdnPt_V4: + return Hexagon::POST_STwri_cdnPt_nv_V4; + + case Hexagon::POST_STwri_cNotPt: + return Hexagon::POST_STwri_cNotPt_nv_V4; + + case Hexagon::POST_STwri_cdnNotPt_V4: + return Hexagon::POST_STwri_cdnNotPt_nv_V4; + + case Hexagon::STw_GP_cPt_V4: + return Hexagon::STw_GP_cPt_nv_V4; + + case Hexagon::STw_GP_cNotPt_V4: + return Hexagon::STw_GP_cNotPt_nv_V4; + + case Hexagon::STw_GP_cdnPt_V4: + return Hexagon::STw_GP_cdnPt_nv_V4; + + case Hexagon::STw_GP_cdnNotPt_V4: + return Hexagon::STw_GP_cdnNotPt_nv_V4; + + case Hexagon::STriw_GP_cPt_V4: + return Hexagon::STriw_GP_cPt_nv_V4; + + case Hexagon::STriw_GP_cNotPt_V4: + return Hexagon::STriw_GP_cNotPt_nv_V4; + + case Hexagon::STriw_GP_cdnPt_V4: + return Hexagon::STriw_GP_cdnPt_nv_V4; + + case Hexagon::STriw_GP_cdnNotPt_V4: + return Hexagon::STriw_GP_cdnNotPt_nv_V4; + } +} + +// Return .new predicate version for an instruction +static int GetDotNewPredOp(const int opc) { + switch (opc) { + default: llvm_unreachable("Unknown .new type"); + // Conditional stores + // Store byte conditionally + case Hexagon::STrib_cPt : + return Hexagon::STrib_cdnPt_V4; + + case Hexagon::STrib_cNotPt : + return Hexagon::STrib_cdnNotPt_V4; + + case Hexagon::STrib_indexed_cPt : + return Hexagon::STrib_indexed_cdnPt_V4; + + case Hexagon::STrib_indexed_cNotPt : + return Hexagon::STrib_indexed_cdnNotPt_V4; + + case Hexagon::STrib_imm_cPt_V4 : + return Hexagon::STrib_imm_cdnPt_V4; + + case Hexagon::STrib_imm_cNotPt_V4 : + return Hexagon::STrib_imm_cdnNotPt_V4; + + case Hexagon::POST_STbri_cPt : + return Hexagon::POST_STbri_cdnPt_V4; + + case Hexagon::POST_STbri_cNotPt : + return Hexagon::POST_STbri_cdnNotPt_V4; + + case Hexagon::STrib_indexed_shl_cPt_V4 : + return Hexagon::STrib_indexed_shl_cdnPt_V4; + + case Hexagon::STrib_indexed_shl_cNotPt_V4 : + return Hexagon::STrib_indexed_shl_cdnNotPt_V4; + + case Hexagon::STb_GP_cPt_V4 : + return Hexagon::STb_GP_cdnPt_V4; + + case Hexagon::STb_GP_cNotPt_V4 : + return Hexagon::STb_GP_cdnNotPt_V4; + + case Hexagon::STrib_GP_cPt_V4 : + return Hexagon::STrib_GP_cdnPt_V4; + + case Hexagon::STrib_GP_cNotPt_V4 : + return Hexagon::STrib_GP_cdnNotPt_V4; + + // Store doubleword conditionally + case Hexagon::STrid_cPt : + return Hexagon::STrid_cdnPt_V4; + + case Hexagon::STrid_cNotPt : + return Hexagon::STrid_cdnNotPt_V4; + + case Hexagon::STrid_indexed_cPt : + return Hexagon::STrid_indexed_cdnPt_V4; + + case Hexagon::STrid_indexed_cNotPt : + return Hexagon::STrid_indexed_cdnNotPt_V4; + + case Hexagon::STrid_indexed_shl_cPt_V4 : + return Hexagon::STrid_indexed_shl_cdnPt_V4; + + case Hexagon::STrid_indexed_shl_cNotPt_V4 : + return Hexagon::STrid_indexed_shl_cdnNotPt_V4; + + case Hexagon::POST_STdri_cPt : + return Hexagon::POST_STdri_cdnPt_V4; + + case Hexagon::POST_STdri_cNotPt : + return Hexagon::POST_STdri_cdnNotPt_V4; + + case Hexagon::STd_GP_cPt_V4 : + return Hexagon::STd_GP_cdnPt_V4; + + case Hexagon::STd_GP_cNotPt_V4 : + return Hexagon::STd_GP_cdnNotPt_V4; + + case Hexagon::STrid_GP_cPt_V4 : + return Hexagon::STrid_GP_cdnPt_V4; + + case Hexagon::STrid_GP_cNotPt_V4 : + return Hexagon::STrid_GP_cdnNotPt_V4; + + // Store halfword conditionally + case Hexagon::STrih_cPt : + return Hexagon::STrih_cdnPt_V4; + + case Hexagon::STrih_cNotPt : + return Hexagon::STrih_cdnNotPt_V4; + + case Hexagon::STrih_indexed_cPt : + return Hexagon::STrih_indexed_cdnPt_V4; + + case Hexagon::STrih_indexed_cNotPt : + return Hexagon::STrih_indexed_cdnNotPt_V4; + + case Hexagon::STrih_imm_cPt_V4 : + return Hexagon::STrih_imm_cdnPt_V4; + + case Hexagon::STrih_imm_cNotPt_V4 : + return Hexagon::STrih_imm_cdnNotPt_V4; + + case Hexagon::STrih_indexed_shl_cPt_V4 : + return Hexagon::STrih_indexed_shl_cdnPt_V4; + + case Hexagon::STrih_indexed_shl_cNotPt_V4 : + return Hexagon::STrih_indexed_shl_cdnNotPt_V4; + + case Hexagon::POST_SThri_cPt : + return Hexagon::POST_SThri_cdnPt_V4; + + case Hexagon::POST_SThri_cNotPt : + return Hexagon::POST_SThri_cdnNotPt_V4; + + case Hexagon::STh_GP_cPt_V4 : + return Hexagon::STh_GP_cdnPt_V4; + + case Hexagon::STh_GP_cNotPt_V4 : + return Hexagon::STh_GP_cdnNotPt_V4; + + case Hexagon::STrih_GP_cPt_V4 : + return Hexagon::STrih_GP_cdnPt_V4; + + case Hexagon::STrih_GP_cNotPt_V4 : + return Hexagon::STrih_GP_cdnNotPt_V4; + + // Store word conditionally + case Hexagon::STriw_cPt : + return Hexagon::STriw_cdnPt_V4; + + case Hexagon::STriw_cNotPt : + return Hexagon::STriw_cdnNotPt_V4; + + case Hexagon::STriw_indexed_cPt : + return Hexagon::STriw_indexed_cdnPt_V4; + + case Hexagon::STriw_indexed_cNotPt : + return Hexagon::STriw_indexed_cdnNotPt_V4; + + case Hexagon::STriw_imm_cPt_V4 : + return Hexagon::STriw_imm_cdnPt_V4; + + case Hexagon::STriw_imm_cNotPt_V4 : + return Hexagon::STriw_imm_cdnNotPt_V4; + + case Hexagon::STriw_indexed_shl_cPt_V4 : + return Hexagon::STriw_indexed_shl_cdnPt_V4; + + case Hexagon::STriw_indexed_shl_cNotPt_V4 : + return Hexagon::STriw_indexed_shl_cdnNotPt_V4; + + case Hexagon::POST_STwri_cPt : + return Hexagon::POST_STwri_cdnPt_V4; + + case Hexagon::POST_STwri_cNotPt : + return Hexagon::POST_STwri_cdnNotPt_V4; + + case Hexagon::STw_GP_cPt_V4 : + return Hexagon::STw_GP_cdnPt_V4; + + case Hexagon::STw_GP_cNotPt_V4 : + return Hexagon::STw_GP_cdnNotPt_V4; + + case Hexagon::STriw_GP_cPt_V4 : + return Hexagon::STriw_GP_cdnPt_V4; + + case Hexagon::STriw_GP_cNotPt_V4 : + return Hexagon::STriw_GP_cdnNotPt_V4; + + // Condtional Jumps + case Hexagon::JMP_c: + return Hexagon::JMP_cdnPt; + + case Hexagon::JMP_cNot: + return Hexagon::JMP_cdnNotPt; + + case Hexagon::JMPR_cPt: + return Hexagon::JMPR_cdnPt_V3; + + case Hexagon::JMPR_cNotPt: + return Hexagon::JMPR_cdnNotPt_V3; + + // Conditional Transfers + case Hexagon::TFR_cPt: + return Hexagon::TFR_cdnPt; + + case Hexagon::TFR_cNotPt: + return Hexagon::TFR_cdnNotPt; + + case Hexagon::TFRI_cPt: + return Hexagon::TFRI_cdnPt; + + case Hexagon::TFRI_cNotPt: + return Hexagon::TFRI_cdnNotPt; + + // Load double word + case Hexagon::LDrid_cPt : + return Hexagon::LDrid_cdnPt; + + case Hexagon::LDrid_cNotPt : + return Hexagon::LDrid_cdnNotPt; + + case Hexagon::LDrid_indexed_cPt : + return Hexagon::LDrid_indexed_cdnPt; + + case Hexagon::LDrid_indexed_cNotPt : + return Hexagon::LDrid_indexed_cdnNotPt; + + case Hexagon::POST_LDrid_cPt : + return Hexagon::POST_LDrid_cdnPt_V4; + + case Hexagon::POST_LDrid_cNotPt : + return Hexagon::POST_LDrid_cdnNotPt_V4; + + // Load word + case Hexagon::LDriw_cPt : + return Hexagon::LDriw_cdnPt; + + case Hexagon::LDriw_cNotPt : + return Hexagon::LDriw_cdnNotPt; + + case Hexagon::LDriw_indexed_cPt : + return Hexagon::LDriw_indexed_cdnPt; + + case Hexagon::LDriw_indexed_cNotPt : + return Hexagon::LDriw_indexed_cdnNotPt; + + case Hexagon::POST_LDriw_cPt : + return Hexagon::POST_LDriw_cdnPt_V4; + + case Hexagon::POST_LDriw_cNotPt : + return Hexagon::POST_LDriw_cdnNotPt_V4; + + // Load halfword + case Hexagon::LDrih_cPt : + return Hexagon::LDrih_cdnPt; + + case Hexagon::LDrih_cNotPt : + return Hexagon::LDrih_cdnNotPt; + + case Hexagon::LDrih_indexed_cPt : + return Hexagon::LDrih_indexed_cdnPt; + + case Hexagon::LDrih_indexed_cNotPt : + return Hexagon::LDrih_indexed_cdnNotPt; + + case Hexagon::POST_LDrih_cPt : + return Hexagon::POST_LDrih_cdnPt_V4; + + case Hexagon::POST_LDrih_cNotPt : + return Hexagon::POST_LDrih_cdnNotPt_V4; + + // Load byte + case Hexagon::LDrib_cPt : + return Hexagon::LDrib_cdnPt; + + case Hexagon::LDrib_cNotPt : + return Hexagon::LDrib_cdnNotPt; + + case Hexagon::LDrib_indexed_cPt : + return Hexagon::LDrib_indexed_cdnPt; + + case Hexagon::LDrib_indexed_cNotPt : + return Hexagon::LDrib_indexed_cdnNotPt; + + case Hexagon::POST_LDrib_cPt : + return Hexagon::POST_LDrib_cdnPt_V4; + + case Hexagon::POST_LDrib_cNotPt : + return Hexagon::POST_LDrib_cdnNotPt_V4; + + // Load unsigned halfword + case Hexagon::LDriuh_cPt : + return Hexagon::LDriuh_cdnPt; + + case Hexagon::LDriuh_cNotPt : + return Hexagon::LDriuh_cdnNotPt; + + case Hexagon::LDriuh_indexed_cPt : + return Hexagon::LDriuh_indexed_cdnPt; + + case Hexagon::LDriuh_indexed_cNotPt : + return Hexagon::LDriuh_indexed_cdnNotPt; + + case Hexagon::POST_LDriuh_cPt : + return Hexagon::POST_LDriuh_cdnPt_V4; + + case Hexagon::POST_LDriuh_cNotPt : + return Hexagon::POST_LDriuh_cdnNotPt_V4; + + // Load unsigned byte + case Hexagon::LDriub_cPt : + return Hexagon::LDriub_cdnPt; + + case Hexagon::LDriub_cNotPt : + return Hexagon::LDriub_cdnNotPt; + + case Hexagon::LDriub_indexed_cPt : + return Hexagon::LDriub_indexed_cdnPt; + + case Hexagon::LDriub_indexed_cNotPt : + return Hexagon::LDriub_indexed_cdnNotPt; + + case Hexagon::POST_LDriub_cPt : + return Hexagon::POST_LDriub_cdnPt_V4; + + case Hexagon::POST_LDriub_cNotPt : + return Hexagon::POST_LDriub_cdnNotPt_V4; + + // V4 indexed+scaled load + + case Hexagon::LDrid_indexed_cPt_V4 : + return Hexagon::LDrid_indexed_cdnPt_V4; + + case Hexagon::LDrid_indexed_cNotPt_V4 : + return Hexagon::LDrid_indexed_cdnNotPt_V4; + + case Hexagon::LDrid_indexed_shl_cPt_V4 : + return Hexagon::LDrid_indexed_shl_cdnPt_V4; + + case Hexagon::LDrid_indexed_shl_cNotPt_V4 : + return Hexagon::LDrid_indexed_shl_cdnNotPt_V4; + + case Hexagon::LDrib_indexed_cPt_V4 : + return Hexagon::LDrib_indexed_cdnPt_V4; + + case Hexagon::LDrib_indexed_cNotPt_V4 : + return Hexagon::LDrib_indexed_cdnNotPt_V4; + + case Hexagon::LDrib_indexed_shl_cPt_V4 : + return Hexagon::LDrib_indexed_shl_cdnPt_V4; + + case Hexagon::LDrib_indexed_shl_cNotPt_V4 : + return Hexagon::LDrib_indexed_shl_cdnNotPt_V4; + + case Hexagon::LDriub_indexed_cPt_V4 : + return Hexagon::LDriub_indexed_cdnPt_V4; + + case Hexagon::LDriub_indexed_cNotPt_V4 : + return Hexagon::LDriub_indexed_cdnNotPt_V4; + + case Hexagon::LDriub_indexed_shl_cPt_V4 : + return Hexagon::LDriub_indexed_shl_cdnPt_V4; + + case Hexagon::LDriub_indexed_shl_cNotPt_V4 : + return Hexagon::LDriub_indexed_shl_cdnNotPt_V4; + + case Hexagon::LDrih_indexed_cPt_V4 : + return Hexagon::LDrih_indexed_cdnPt_V4; + + case Hexagon::LDrih_indexed_cNotPt_V4 : + return Hexagon::LDrih_indexed_cdnNotPt_V4; + + case Hexagon::LDrih_indexed_shl_cPt_V4 : + return Hexagon::LDrih_indexed_shl_cdnPt_V4; + + case Hexagon::LDrih_indexed_shl_cNotPt_V4 : + return Hexagon::LDrih_indexed_shl_cdnNotPt_V4; + + case Hexagon::LDriuh_indexed_cPt_V4 : + return Hexagon::LDriuh_indexed_cdnPt_V4; + + case Hexagon::LDriuh_indexed_cNotPt_V4 : + return Hexagon::LDriuh_indexed_cdnNotPt_V4; + + case Hexagon::LDriuh_indexed_shl_cPt_V4 : + return Hexagon::LDriuh_indexed_shl_cdnPt_V4; + + case Hexagon::LDriuh_indexed_shl_cNotPt_V4 : + return Hexagon::LDriuh_indexed_shl_cdnNotPt_V4; + + case Hexagon::LDriw_indexed_cPt_V4 : + return Hexagon::LDriw_indexed_cdnPt_V4; + + case Hexagon::LDriw_indexed_cNotPt_V4 : + return Hexagon::LDriw_indexed_cdnNotPt_V4; + + case Hexagon::LDriw_indexed_shl_cPt_V4 : + return Hexagon::LDriw_indexed_shl_cdnPt_V4; + + case Hexagon::LDriw_indexed_shl_cNotPt_V4 : + return Hexagon::LDriw_indexed_shl_cdnNotPt_V4; + + // V4 global address load + + case Hexagon::LDd_GP_cPt_V4: + return Hexagon::LDd_GP_cdnPt_V4; + + case Hexagon::LDd_GP_cNotPt_V4: + return Hexagon::LDd_GP_cdnNotPt_V4; + + case Hexagon::LDb_GP_cPt_V4: + return Hexagon::LDb_GP_cdnPt_V4; + + case Hexagon::LDb_GP_cNotPt_V4: + return Hexagon::LDb_GP_cdnNotPt_V4; + + case Hexagon::LDub_GP_cPt_V4: + return Hexagon::LDub_GP_cdnPt_V4; + + case Hexagon::LDub_GP_cNotPt_V4: + return Hexagon::LDub_GP_cdnNotPt_V4; + + case Hexagon::LDh_GP_cPt_V4: + return Hexagon::LDh_GP_cdnPt_V4; + + case Hexagon::LDh_GP_cNotPt_V4: + return Hexagon::LDh_GP_cdnNotPt_V4; + + case Hexagon::LDuh_GP_cPt_V4: + return Hexagon::LDuh_GP_cdnPt_V4; + + case Hexagon::LDuh_GP_cNotPt_V4: + return Hexagon::LDuh_GP_cdnNotPt_V4; + + case Hexagon::LDw_GP_cPt_V4: + return Hexagon::LDw_GP_cdnPt_V4; + + case Hexagon::LDw_GP_cNotPt_V4: + return Hexagon::LDw_GP_cdnNotPt_V4; + + case Hexagon::LDrid_GP_cPt_V4: + return Hexagon::LDrid_GP_cdnPt_V4; + + case Hexagon::LDrid_GP_cNotPt_V4: + return Hexagon::LDrid_GP_cdnNotPt_V4; + + case Hexagon::LDrib_GP_cPt_V4: + return Hexagon::LDrib_GP_cdnPt_V4; + + case Hexagon::LDrib_GP_cNotPt_V4: + return Hexagon::LDrib_GP_cdnNotPt_V4; + + case Hexagon::LDriub_GP_cPt_V4: + return Hexagon::LDriub_GP_cdnPt_V4; + + case Hexagon::LDriub_GP_cNotPt_V4: + return Hexagon::LDriub_GP_cdnNotPt_V4; + + case Hexagon::LDrih_GP_cPt_V4: + return Hexagon::LDrih_GP_cdnPt_V4; + + case Hexagon::LDrih_GP_cNotPt_V4: + return Hexagon::LDrih_GP_cdnNotPt_V4; + + case Hexagon::LDriuh_GP_cPt_V4: + return Hexagon::LDriuh_GP_cdnPt_V4; + + case Hexagon::LDriuh_GP_cNotPt_V4: + return Hexagon::LDriuh_GP_cdnNotPt_V4; + + case Hexagon::LDriw_GP_cPt_V4: + return Hexagon::LDriw_GP_cdnPt_V4; + + case Hexagon::LDriw_GP_cNotPt_V4: + return Hexagon::LDriw_GP_cdnNotPt_V4; + + // Conditional store new-value byte + case Hexagon::STrib_cPt_nv_V4 : + return Hexagon::STrib_cdnPt_nv_V4; + case Hexagon::STrib_cNotPt_nv_V4 : + return Hexagon::STrib_cdnNotPt_nv_V4; + + case Hexagon::STrib_indexed_cPt_nv_V4 : + return Hexagon::STrib_indexed_cdnPt_nv_V4; + case Hexagon::STrib_indexed_cNotPt_nv_V4 : + return Hexagon::STrib_indexed_cdnNotPt_nv_V4; + + case Hexagon::STrib_indexed_shl_cPt_nv_V4 : + return Hexagon::STrib_indexed_shl_cdnPt_nv_V4; + case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 : + return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4; + + case Hexagon::POST_STbri_cPt_nv_V4 : + return Hexagon::POST_STbri_cdnPt_nv_V4; + case Hexagon::POST_STbri_cNotPt_nv_V4 : + return Hexagon::POST_STbri_cdnNotPt_nv_V4; + + case Hexagon::STb_GP_cPt_nv_V4 : + return Hexagon::STb_GP_cdnPt_nv_V4; + + case Hexagon::STb_GP_cNotPt_nv_V4 : + return Hexagon::STb_GP_cdnNotPt_nv_V4; + + case Hexagon::STrib_GP_cPt_nv_V4 : + return Hexagon::STrib_GP_cdnPt_nv_V4; + + case Hexagon::STrib_GP_cNotPt_nv_V4 : + return Hexagon::STrib_GP_cdnNotPt_nv_V4; + + // Conditional store new-value halfword + case Hexagon::STrih_cPt_nv_V4 : + return Hexagon::STrih_cdnPt_nv_V4; + case Hexagon::STrih_cNotPt_nv_V4 : + return Hexagon::STrih_cdnNotPt_nv_V4; + + case Hexagon::STrih_indexed_cPt_nv_V4 : + return Hexagon::STrih_indexed_cdnPt_nv_V4; + case Hexagon::STrih_indexed_cNotPt_nv_V4 : + return Hexagon::STrih_indexed_cdnNotPt_nv_V4; + + case Hexagon::STrih_indexed_shl_cPt_nv_V4 : + return Hexagon::STrih_indexed_shl_cdnPt_nv_V4; + case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 : + return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4; + + case Hexagon::POST_SThri_cPt_nv_V4 : + return Hexagon::POST_SThri_cdnPt_nv_V4; + case Hexagon::POST_SThri_cNotPt_nv_V4 : + return Hexagon::POST_SThri_cdnNotPt_nv_V4; + + case Hexagon::STh_GP_cPt_nv_V4 : + return Hexagon::STh_GP_cdnPt_nv_V4; + + case Hexagon::STh_GP_cNotPt_nv_V4 : + return Hexagon::STh_GP_cdnNotPt_nv_V4; + + case Hexagon::STrih_GP_cPt_nv_V4 : + return Hexagon::STrih_GP_cdnPt_nv_V4; + + case Hexagon::STrih_GP_cNotPt_nv_V4 : + return Hexagon::STrih_GP_cdnNotPt_nv_V4; + + // Conditional store new-value word + case Hexagon::STriw_cPt_nv_V4 : + return Hexagon::STriw_cdnPt_nv_V4; + case Hexagon::STriw_cNotPt_nv_V4 : + return Hexagon::STriw_cdnNotPt_nv_V4; + + case Hexagon::STriw_indexed_cPt_nv_V4 : + return Hexagon::STriw_indexed_cdnPt_nv_V4; + case Hexagon::STriw_indexed_cNotPt_nv_V4 : + return Hexagon::STriw_indexed_cdnNotPt_nv_V4; + + case Hexagon::STriw_indexed_shl_cPt_nv_V4 : + return Hexagon::STriw_indexed_shl_cdnPt_nv_V4; + case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 : + return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4; + + case Hexagon::POST_STwri_cPt_nv_V4 : + return Hexagon::POST_STwri_cdnPt_nv_V4; + case Hexagon::POST_STwri_cNotPt_nv_V4: + return Hexagon::POST_STwri_cdnNotPt_nv_V4; + + case Hexagon::STw_GP_cPt_nv_V4 : + return Hexagon::STw_GP_cdnPt_nv_V4; + + case Hexagon::STw_GP_cNotPt_nv_V4 : + return Hexagon::STw_GP_cdnNotPt_nv_V4; + + case Hexagon::STriw_GP_cPt_nv_V4 : + return Hexagon::STriw_GP_cdnPt_nv_V4; + + case Hexagon::STriw_GP_cNotPt_nv_V4 : + return Hexagon::STriw_GP_cdnNotPt_nv_V4; + + // Conditional add + case Hexagon::ADD_ri_cPt : + return Hexagon::ADD_ri_cdnPt; + case Hexagon::ADD_ri_cNotPt : + return Hexagon::ADD_ri_cdnNotPt; + + case Hexagon::ADD_rr_cPt : + return Hexagon::ADD_rr_cdnPt; + case Hexagon::ADD_rr_cNotPt : + return Hexagon::ADD_rr_cdnNotPt; + + // Conditional logical Operations + case Hexagon::XOR_rr_cPt : + return Hexagon::XOR_rr_cdnPt; + case Hexagon::XOR_rr_cNotPt : + return Hexagon::XOR_rr_cdnNotPt; + + case Hexagon::AND_rr_cPt : + return Hexagon::AND_rr_cdnPt; + case Hexagon::AND_rr_cNotPt : + return Hexagon::AND_rr_cdnNotPt; + + case Hexagon::OR_rr_cPt : + return Hexagon::OR_rr_cdnPt; + case Hexagon::OR_rr_cNotPt : + return Hexagon::OR_rr_cdnNotPt; + + // Conditional Subtract + case Hexagon::SUB_rr_cPt : + return Hexagon::SUB_rr_cdnPt; + case Hexagon::SUB_rr_cNotPt : + return Hexagon::SUB_rr_cdnNotPt; + + // Conditional combine + case Hexagon::COMBINE_rr_cPt : + return Hexagon::COMBINE_rr_cdnPt; + case Hexagon::COMBINE_rr_cNotPt : + return Hexagon::COMBINE_rr_cdnNotPt; + + case Hexagon::ASLH_cPt_V4 : + return Hexagon::ASLH_cdnPt_V4; + case Hexagon::ASLH_cNotPt_V4 : + return Hexagon::ASLH_cdnNotPt_V4; + + case Hexagon::ASRH_cPt_V4 : + return Hexagon::ASRH_cdnPt_V4; + case Hexagon::ASRH_cNotPt_V4 : + return Hexagon::ASRH_cdnNotPt_V4; + + case Hexagon::SXTB_cPt_V4 : + return Hexagon::SXTB_cdnPt_V4; + case Hexagon::SXTB_cNotPt_V4 : + return Hexagon::SXTB_cdnNotPt_V4; + + case Hexagon::SXTH_cPt_V4 : + return Hexagon::SXTH_cdnPt_V4; + case Hexagon::SXTH_cNotPt_V4 : + return Hexagon::SXTH_cdnNotPt_V4; + + case Hexagon::ZXTB_cPt_V4 : + return Hexagon::ZXTB_cdnPt_V4; + case Hexagon::ZXTB_cNotPt_V4 : + return Hexagon::ZXTB_cdnNotPt_V4; + + case Hexagon::ZXTH_cPt_V4 : + return Hexagon::ZXTH_cdnPt_V4; + case Hexagon::ZXTH_cNotPt_V4 : + return Hexagon::ZXTH_cdnNotPt_V4; + } +} + +// Returns true if an instruction can be promoted to .new predicate +// or new-value store. +bool HexagonPacketizerList::isNewifiable(MachineInstr* MI) { + if ( isCondInst(MI) || IsNewifyStore(MI)) + return true; + else + return false; +} + +bool HexagonPacketizerList::isCondInst (MachineInstr* MI) { + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + const MCInstrDesc& TID = MI->getDesc(); + // bug 5670: until that is fixed, + // this portion is disabled. + if ( TID.isConditionalBranch() // && !IsRegisterJump(MI)) || + || QII->isConditionalTransfer(MI) + || QII->isConditionalALU32(MI) + || QII->isConditionalLoad(MI) + || QII->isConditionalStore(MI)) { + return true; + } + return false; +} + + +// Promote an instructiont to its .new form. +// At this time, we have already made a call to CanPromoteToDotNew +// and made sure that it can *indeed* be promoted. +bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI, + SDep::Kind DepType, MachineBasicBlock::iterator &MII, + const TargetRegisterClass* RC) { + + assert (DepType == SDep::Data); + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + + int NewOpcode; + if (RC == &Hexagon::PredRegsRegClass) + NewOpcode = GetDotNewPredOp(MI->getOpcode()); + else + NewOpcode = GetDotNewOp(MI->getOpcode()); + MI->setDesc(QII->get(NewOpcode)); + + return true; +} + +// Returns the most basic instruction for the .new predicated instructions and +// new-value stores. +// For example, all of the following instructions will be converted back to the +// same instruction: +// 1) if (p0.new) memw(R0+#0) = R1.new ---> +// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1 +// 3) if (p0.new) memw(R0+#0) = R1 ---> +// +// To understand the translation of instruction 1 to its original form, consider +// a packet with 3 instructions. +// { p0 = cmp.eq(R0,R1) +// if (p0.new) R2 = add(R3, R4) +// R5 = add (R3, R1) +// } +// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet +// +// This instruction can be part of the previous packet only if both p0 and R2 +// are promoted to .new values. This promotion happens in steps, first +// predicate register is promoted to .new and in the next iteration R2 is +// promoted. Therefore, in case of dependence check failure (due to R5) during +// next iteration, it should be converted back to its most basic form. + +static int GetDotOldOp(const int opc) { + switch (opc) { + default: llvm_unreachable("Unknown .old type"); + case Hexagon::TFR_cdnPt: + return Hexagon::TFR_cPt; + + case Hexagon::TFR_cdnNotPt: + return Hexagon::TFR_cNotPt; + + case Hexagon::TFRI_cdnPt: + return Hexagon::TFRI_cPt; + + case Hexagon::TFRI_cdnNotPt: + return Hexagon::TFRI_cNotPt; + + case Hexagon::JMP_cdnPt: + return Hexagon::JMP_c; + + case Hexagon::JMP_cdnNotPt: + return Hexagon::JMP_cNot; + + case Hexagon::JMPR_cdnPt_V3: + return Hexagon::JMPR_cPt; + + case Hexagon::JMPR_cdnNotPt_V3: + return Hexagon::JMPR_cNotPt; + + // Load double word + + case Hexagon::LDrid_cdnPt : + return Hexagon::LDrid_cPt; + + case Hexagon::LDrid_cdnNotPt : + return Hexagon::LDrid_cNotPt; + + case Hexagon::LDrid_indexed_cdnPt : + return Hexagon::LDrid_indexed_cPt; + + case Hexagon::LDrid_indexed_cdnNotPt : + return Hexagon::LDrid_indexed_cNotPt; + + case Hexagon::POST_LDrid_cdnPt_V4 : + return Hexagon::POST_LDrid_cPt; + + case Hexagon::POST_LDrid_cdnNotPt_V4 : + return Hexagon::POST_LDrid_cNotPt; + + // Load word + + case Hexagon::LDriw_cdnPt : + return Hexagon::LDriw_cPt; + + case Hexagon::LDriw_cdnNotPt : + return Hexagon::LDriw_cNotPt; + + case Hexagon::LDriw_indexed_cdnPt : + return Hexagon::LDriw_indexed_cPt; + + case Hexagon::LDriw_indexed_cdnNotPt : + return Hexagon::LDriw_indexed_cNotPt; + + case Hexagon::POST_LDriw_cdnPt_V4 : + return Hexagon::POST_LDriw_cPt; + + case Hexagon::POST_LDriw_cdnNotPt_V4 : + return Hexagon::POST_LDriw_cNotPt; + + // Load half + + case Hexagon::LDrih_cdnPt : + return Hexagon::LDrih_cPt; + + case Hexagon::LDrih_cdnNotPt : + return Hexagon::LDrih_cNotPt; + + case Hexagon::LDrih_indexed_cdnPt : + return Hexagon::LDrih_indexed_cPt; + + case Hexagon::LDrih_indexed_cdnNotPt : + return Hexagon::LDrih_indexed_cNotPt; + + case Hexagon::POST_LDrih_cdnPt_V4 : + return Hexagon::POST_LDrih_cPt; + + case Hexagon::POST_LDrih_cdnNotPt_V4 : + return Hexagon::POST_LDrih_cNotPt; + + // Load byte + + case Hexagon::LDrib_cdnPt : + return Hexagon::LDrib_cPt; + + case Hexagon::LDrib_cdnNotPt : + return Hexagon::LDrib_cNotPt; + + case Hexagon::LDrib_indexed_cdnPt : + return Hexagon::LDrib_indexed_cPt; + + case Hexagon::LDrib_indexed_cdnNotPt : + return Hexagon::LDrib_indexed_cNotPt; + + case Hexagon::POST_LDrib_cdnPt_V4 : + return Hexagon::POST_LDrib_cPt; + + case Hexagon::POST_LDrib_cdnNotPt_V4 : + return Hexagon::POST_LDrib_cNotPt; + + // Load unsigned half + + case Hexagon::LDriuh_cdnPt : + return Hexagon::LDriuh_cPt; + + case Hexagon::LDriuh_cdnNotPt : + return Hexagon::LDriuh_cNotPt; + + case Hexagon::LDriuh_indexed_cdnPt : + return Hexagon::LDriuh_indexed_cPt; + + case Hexagon::LDriuh_indexed_cdnNotPt : + return Hexagon::LDriuh_indexed_cNotPt; + + case Hexagon::POST_LDriuh_cdnPt_V4 : + return Hexagon::POST_LDriuh_cPt; + + case Hexagon::POST_LDriuh_cdnNotPt_V4 : + return Hexagon::POST_LDriuh_cNotPt; + + // Load unsigned byte + case Hexagon::LDriub_cdnPt : + return Hexagon::LDriub_cPt; + + case Hexagon::LDriub_cdnNotPt : + return Hexagon::LDriub_cNotPt; + + case Hexagon::LDriub_indexed_cdnPt : + return Hexagon::LDriub_indexed_cPt; + + case Hexagon::LDriub_indexed_cdnNotPt : + return Hexagon::LDriub_indexed_cNotPt; + + case Hexagon::POST_LDriub_cdnPt_V4 : + return Hexagon::POST_LDriub_cPt; + + case Hexagon::POST_LDriub_cdnNotPt_V4 : + return Hexagon::POST_LDriub_cNotPt; + + // V4 indexed+scaled Load + + case Hexagon::LDrid_indexed_cdnPt_V4 : + return Hexagon::LDrid_indexed_cPt_V4; + + case Hexagon::LDrid_indexed_cdnNotPt_V4 : + return Hexagon::LDrid_indexed_cNotPt_V4; + + case Hexagon::LDrid_indexed_shl_cdnPt_V4 : + return Hexagon::LDrid_indexed_shl_cPt_V4; + + case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 : + return Hexagon::LDrid_indexed_shl_cNotPt_V4; + + case Hexagon::LDrib_indexed_cdnPt_V4 : + return Hexagon::LDrib_indexed_cPt_V4; + + case Hexagon::LDrib_indexed_cdnNotPt_V4 : + return Hexagon::LDrib_indexed_cNotPt_V4; + + case Hexagon::LDrib_indexed_shl_cdnPt_V4 : + return Hexagon::LDrib_indexed_shl_cPt_V4; + + case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 : + return Hexagon::LDrib_indexed_shl_cNotPt_V4; + + case Hexagon::LDriub_indexed_cdnPt_V4 : + return Hexagon::LDriub_indexed_cPt_V4; + + case Hexagon::LDriub_indexed_cdnNotPt_V4 : + return Hexagon::LDriub_indexed_cNotPt_V4; + + case Hexagon::LDriub_indexed_shl_cdnPt_V4 : + return Hexagon::LDriub_indexed_shl_cPt_V4; + + case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 : + return Hexagon::LDriub_indexed_shl_cNotPt_V4; + + case Hexagon::LDrih_indexed_cdnPt_V4 : + return Hexagon::LDrih_indexed_cPt_V4; + + case Hexagon::LDrih_indexed_cdnNotPt_V4 : + return Hexagon::LDrih_indexed_cNotPt_V4; + + case Hexagon::LDrih_indexed_shl_cdnPt_V4 : + return Hexagon::LDrih_indexed_shl_cPt_V4; + + case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 : + return Hexagon::LDrih_indexed_shl_cNotPt_V4; + + case Hexagon::LDriuh_indexed_cdnPt_V4 : + return Hexagon::LDriuh_indexed_cPt_V4; + + case Hexagon::LDriuh_indexed_cdnNotPt_V4 : + return Hexagon::LDriuh_indexed_cNotPt_V4; + + case Hexagon::LDriuh_indexed_shl_cdnPt_V4 : + return Hexagon::LDriuh_indexed_shl_cPt_V4; + + case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 : + return Hexagon::LDriuh_indexed_shl_cNotPt_V4; + + case Hexagon::LDriw_indexed_cdnPt_V4 : + return Hexagon::LDriw_indexed_cPt_V4; + + case Hexagon::LDriw_indexed_cdnNotPt_V4 : + return Hexagon::LDriw_indexed_cNotPt_V4; + + case Hexagon::LDriw_indexed_shl_cdnPt_V4 : + return Hexagon::LDriw_indexed_shl_cPt_V4; + + case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 : + return Hexagon::LDriw_indexed_shl_cNotPt_V4; + + // V4 global address load + + case Hexagon::LDd_GP_cdnPt_V4: + return Hexagon::LDd_GP_cPt_V4; + + case Hexagon::LDd_GP_cdnNotPt_V4: + return Hexagon::LDd_GP_cNotPt_V4; + + case Hexagon::LDb_GP_cdnPt_V4: + return Hexagon::LDb_GP_cPt_V4; + + case Hexagon::LDb_GP_cdnNotPt_V4: + return Hexagon::LDb_GP_cNotPt_V4; + + case Hexagon::LDub_GP_cdnPt_V4: + return Hexagon::LDub_GP_cPt_V4; + + case Hexagon::LDub_GP_cdnNotPt_V4: + return Hexagon::LDub_GP_cNotPt_V4; + + case Hexagon::LDh_GP_cdnPt_V4: + return Hexagon::LDh_GP_cPt_V4; + + case Hexagon::LDh_GP_cdnNotPt_V4: + return Hexagon::LDh_GP_cNotPt_V4; + + case Hexagon::LDuh_GP_cdnPt_V4: + return Hexagon::LDuh_GP_cPt_V4; + + case Hexagon::LDuh_GP_cdnNotPt_V4: + return Hexagon::LDuh_GP_cNotPt_V4; + + case Hexagon::LDw_GP_cdnPt_V4: + return Hexagon::LDw_GP_cPt_V4; + + case Hexagon::LDw_GP_cdnNotPt_V4: + return Hexagon::LDw_GP_cNotPt_V4; + + case Hexagon::LDrid_GP_cdnPt_V4: + return Hexagon::LDrid_GP_cPt_V4; + + case Hexagon::LDrid_GP_cdnNotPt_V4: + return Hexagon::LDrid_GP_cNotPt_V4; + + case Hexagon::LDrib_GP_cdnPt_V4: + return Hexagon::LDrib_GP_cPt_V4; + + case Hexagon::LDrib_GP_cdnNotPt_V4: + return Hexagon::LDrib_GP_cNotPt_V4; + + case Hexagon::LDriub_GP_cdnPt_V4: + return Hexagon::LDriub_GP_cPt_V4; + + case Hexagon::LDriub_GP_cdnNotPt_V4: + return Hexagon::LDriub_GP_cNotPt_V4; + + case Hexagon::LDrih_GP_cdnPt_V4: + return Hexagon::LDrih_GP_cPt_V4; + + case Hexagon::LDrih_GP_cdnNotPt_V4: + return Hexagon::LDrih_GP_cNotPt_V4; + + case Hexagon::LDriuh_GP_cdnPt_V4: + return Hexagon::LDriuh_GP_cPt_V4; + + case Hexagon::LDriuh_GP_cdnNotPt_V4: + return Hexagon::LDriuh_GP_cNotPt_V4; + + case Hexagon::LDriw_GP_cdnPt_V4: + return Hexagon::LDriw_GP_cPt_V4; + + case Hexagon::LDriw_GP_cdnNotPt_V4: + return Hexagon::LDriw_GP_cNotPt_V4; + + // Conditional add + + case Hexagon::ADD_ri_cdnPt : + return Hexagon::ADD_ri_cPt; + case Hexagon::ADD_ri_cdnNotPt : + return Hexagon::ADD_ri_cNotPt; + + case Hexagon::ADD_rr_cdnPt : + return Hexagon::ADD_rr_cPt; + case Hexagon::ADD_rr_cdnNotPt: + return Hexagon::ADD_rr_cNotPt; + + // Conditional logical Operations + + case Hexagon::XOR_rr_cdnPt : + return Hexagon::XOR_rr_cPt; + case Hexagon::XOR_rr_cdnNotPt : + return Hexagon::XOR_rr_cNotPt; + + case Hexagon::AND_rr_cdnPt : + return Hexagon::AND_rr_cPt; + case Hexagon::AND_rr_cdnNotPt : + return Hexagon::AND_rr_cNotPt; + + case Hexagon::OR_rr_cdnPt : + return Hexagon::OR_rr_cPt; + case Hexagon::OR_rr_cdnNotPt : + return Hexagon::OR_rr_cNotPt; + + // Conditional Subtract + + case Hexagon::SUB_rr_cdnPt : + return Hexagon::SUB_rr_cPt; + case Hexagon::SUB_rr_cdnNotPt : + return Hexagon::SUB_rr_cNotPt; + + // Conditional combine + + case Hexagon::COMBINE_rr_cdnPt : + return Hexagon::COMBINE_rr_cPt; + case Hexagon::COMBINE_rr_cdnNotPt : + return Hexagon::COMBINE_rr_cNotPt; + +// Conditional shift operations + + case Hexagon::ASLH_cdnPt_V4 : + return Hexagon::ASLH_cPt_V4; + case Hexagon::ASLH_cdnNotPt_V4 : + return Hexagon::ASLH_cNotPt_V4; + + case Hexagon::ASRH_cdnPt_V4 : + return Hexagon::ASRH_cPt_V4; + case Hexagon::ASRH_cdnNotPt_V4 : + return Hexagon::ASRH_cNotPt_V4; + + case Hexagon::SXTB_cdnPt_V4 : + return Hexagon::SXTB_cPt_V4; + case Hexagon::SXTB_cdnNotPt_V4 : + return Hexagon::SXTB_cNotPt_V4; + + case Hexagon::SXTH_cdnPt_V4 : + return Hexagon::SXTH_cPt_V4; + case Hexagon::SXTH_cdnNotPt_V4 : + return Hexagon::SXTH_cNotPt_V4; + + case Hexagon::ZXTB_cdnPt_V4 : + return Hexagon::ZXTB_cPt_V4; + case Hexagon::ZXTB_cdnNotPt_V4 : + return Hexagon::ZXTB_cNotPt_V4; + + case Hexagon::ZXTH_cdnPt_V4 : + return Hexagon::ZXTH_cPt_V4; + case Hexagon::ZXTH_cdnNotPt_V4 : + return Hexagon::ZXTH_cNotPt_V4; + + // Store byte + + case Hexagon::STrib_imm_cdnPt_V4 : + return Hexagon::STrib_imm_cPt_V4; + + case Hexagon::STrib_imm_cdnNotPt_V4 : + return Hexagon::STrib_imm_cNotPt_V4; + + case Hexagon::STrib_cdnPt_nv_V4 : + case Hexagon::STrib_cPt_nv_V4 : + case Hexagon::STrib_cdnPt_V4 : + return Hexagon::STrib_cPt; + + case Hexagon::STrib_cdnNotPt_nv_V4 : + case Hexagon::STrib_cNotPt_nv_V4 : + case Hexagon::STrib_cdnNotPt_V4 : + return Hexagon::STrib_cNotPt; + + case Hexagon::STrib_indexed_cdnPt_V4 : + case Hexagon::STrib_indexed_cPt_nv_V4 : + case Hexagon::STrib_indexed_cdnPt_nv_V4 : + return Hexagon::STrib_indexed_cPt; + + case Hexagon::STrib_indexed_cdnNotPt_V4 : + case Hexagon::STrib_indexed_cNotPt_nv_V4 : + case Hexagon::STrib_indexed_cdnNotPt_nv_V4 : + return Hexagon::STrib_indexed_cNotPt; + + case Hexagon::STrib_indexed_shl_cdnPt_nv_V4: + case Hexagon::STrib_indexed_shl_cPt_nv_V4 : + case Hexagon::STrib_indexed_shl_cdnPt_V4 : + return Hexagon::STrib_indexed_shl_cPt_V4; + + case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4: + case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 : + case Hexagon::STrib_indexed_shl_cdnNotPt_V4 : + return Hexagon::STrib_indexed_shl_cNotPt_V4; + + case Hexagon::POST_STbri_cdnPt_nv_V4 : + case Hexagon::POST_STbri_cPt_nv_V4 : + case Hexagon::POST_STbri_cdnPt_V4 : + return Hexagon::POST_STbri_cPt; + + case Hexagon::POST_STbri_cdnNotPt_nv_V4 : + case Hexagon::POST_STbri_cNotPt_nv_V4: + case Hexagon::POST_STbri_cdnNotPt_V4 : + return Hexagon::POST_STbri_cNotPt; + + case Hexagon::STb_GP_cdnPt_nv_V4: + case Hexagon::STb_GP_cdnPt_V4: + case Hexagon::STb_GP_cPt_nv_V4: + return Hexagon::STb_GP_cPt_V4; + + case Hexagon::STb_GP_cdnNotPt_nv_V4: + case Hexagon::STb_GP_cdnNotPt_V4: + case Hexagon::STb_GP_cNotPt_nv_V4: + return Hexagon::STb_GP_cNotPt_V4; + + case Hexagon::STrib_GP_cdnPt_nv_V4: + case Hexagon::STrib_GP_cdnPt_V4: + case Hexagon::STrib_GP_cPt_nv_V4: + return Hexagon::STrib_GP_cPt_V4; + + case Hexagon::STrib_GP_cdnNotPt_nv_V4: + case Hexagon::STrib_GP_cdnNotPt_V4: + case Hexagon::STrib_GP_cNotPt_nv_V4: + return Hexagon::STrib_GP_cNotPt_V4; + + // Store new-value byte - unconditional + case Hexagon::STrib_nv_V4: + return Hexagon::STrib; + + case Hexagon::STrib_indexed_nv_V4: + return Hexagon::STrib_indexed; + + case Hexagon::STrib_indexed_shl_nv_V4: + return Hexagon::STrib_indexed_shl_V4; + + case Hexagon::STrib_shl_nv_V4: + return Hexagon::STrib_shl_V4; + + case Hexagon::STrib_GP_nv_V4: + return Hexagon::STrib_GP_V4; + + case Hexagon::STb_GP_nv_V4: + return Hexagon::STb_GP_V4; + + case Hexagon::POST_STbri_nv_V4: + return Hexagon::POST_STbri; + + // Store halfword + case Hexagon::STrih_imm_cdnPt_V4 : + return Hexagon::STrih_imm_cPt_V4; + + case Hexagon::STrih_imm_cdnNotPt_V4 : + return Hexagon::STrih_imm_cNotPt_V4; + + case Hexagon::STrih_cdnPt_nv_V4 : + case Hexagon::STrih_cPt_nv_V4 : + case Hexagon::STrih_cdnPt_V4 : + return Hexagon::STrih_cPt; + + case Hexagon::STrih_cdnNotPt_nv_V4 : + case Hexagon::STrih_cNotPt_nv_V4 : + case Hexagon::STrih_cdnNotPt_V4 : + return Hexagon::STrih_cNotPt; + + case Hexagon::STrih_indexed_cdnPt_nv_V4: + case Hexagon::STrih_indexed_cPt_nv_V4 : + case Hexagon::STrih_indexed_cdnPt_V4 : + return Hexagon::STrih_indexed_cPt; + + case Hexagon::STrih_indexed_cdnNotPt_nv_V4: + case Hexagon::STrih_indexed_cNotPt_nv_V4 : + case Hexagon::STrih_indexed_cdnNotPt_V4 : + return Hexagon::STrih_indexed_cNotPt; + + case Hexagon::STrih_indexed_shl_cdnPt_nv_V4 : + case Hexagon::STrih_indexed_shl_cPt_nv_V4 : + case Hexagon::STrih_indexed_shl_cdnPt_V4 : + return Hexagon::STrih_indexed_shl_cPt_V4; + + case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4 : + case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 : + case Hexagon::STrih_indexed_shl_cdnNotPt_V4 : + return Hexagon::STrih_indexed_shl_cNotPt_V4; + + case Hexagon::POST_SThri_cdnPt_nv_V4 : + case Hexagon::POST_SThri_cPt_nv_V4 : + case Hexagon::POST_SThri_cdnPt_V4 : + return Hexagon::POST_SThri_cPt; + + case Hexagon::POST_SThri_cdnNotPt_nv_V4 : + case Hexagon::POST_SThri_cNotPt_nv_V4 : + case Hexagon::POST_SThri_cdnNotPt_V4 : + return Hexagon::POST_SThri_cNotPt; + + case Hexagon::STh_GP_cdnPt_nv_V4: + case Hexagon::STh_GP_cdnPt_V4: + case Hexagon::STh_GP_cPt_nv_V4: + return Hexagon::STh_GP_cPt_V4; + + case Hexagon::STh_GP_cdnNotPt_nv_V4: + case Hexagon::STh_GP_cdnNotPt_V4: + case Hexagon::STh_GP_cNotPt_nv_V4: + return Hexagon::STh_GP_cNotPt_V4; + + case Hexagon::STrih_GP_cdnPt_nv_V4: + case Hexagon::STrih_GP_cdnPt_V4: + case Hexagon::STrih_GP_cPt_nv_V4: + return Hexagon::STrih_GP_cPt_V4; + + case Hexagon::STrih_GP_cdnNotPt_nv_V4: + case Hexagon::STrih_GP_cdnNotPt_V4: + case Hexagon::STrih_GP_cNotPt_nv_V4: + return Hexagon::STrih_GP_cNotPt_V4; + + // Store new-value halfword - unconditional + + case Hexagon::STrih_nv_V4: + return Hexagon::STrih; + + case Hexagon::STrih_indexed_nv_V4: + return Hexagon::STrih_indexed; + + case Hexagon::STrih_indexed_shl_nv_V4: + return Hexagon::STrih_indexed_shl_V4; + + case Hexagon::STrih_shl_nv_V4: + return Hexagon::STrih_shl_V4; + + case Hexagon::STrih_GP_nv_V4: + return Hexagon::STrih_GP_V4; + + case Hexagon::STh_GP_nv_V4: + return Hexagon::STh_GP_V4; + + case Hexagon::POST_SThri_nv_V4: + return Hexagon::POST_SThri; + + // Store word + + case Hexagon::STriw_imm_cdnPt_V4 : + return Hexagon::STriw_imm_cPt_V4; + + case Hexagon::STriw_imm_cdnNotPt_V4 : + return Hexagon::STriw_imm_cNotPt_V4; + + case Hexagon::STriw_cdnPt_nv_V4 : + case Hexagon::STriw_cPt_nv_V4 : + case Hexagon::STriw_cdnPt_V4 : + return Hexagon::STriw_cPt; + + case Hexagon::STriw_cdnNotPt_nv_V4 : + case Hexagon::STriw_cNotPt_nv_V4 : + case Hexagon::STriw_cdnNotPt_V4 : + return Hexagon::STriw_cNotPt; + + case Hexagon::STriw_indexed_cdnPt_nv_V4 : + case Hexagon::STriw_indexed_cPt_nv_V4 : + case Hexagon::STriw_indexed_cdnPt_V4 : + return Hexagon::STriw_indexed_cPt; + + case Hexagon::STriw_indexed_cdnNotPt_nv_V4 : + case Hexagon::STriw_indexed_cNotPt_nv_V4 : + case Hexagon::STriw_indexed_cdnNotPt_V4 : + return Hexagon::STriw_indexed_cNotPt; + + case Hexagon::STriw_indexed_shl_cdnPt_nv_V4 : + case Hexagon::STriw_indexed_shl_cPt_nv_V4 : + case Hexagon::STriw_indexed_shl_cdnPt_V4 : + return Hexagon::STriw_indexed_shl_cPt_V4; + + case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4 : + case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 : + case Hexagon::STriw_indexed_shl_cdnNotPt_V4 : + return Hexagon::STriw_indexed_shl_cNotPt_V4; + + case Hexagon::POST_STwri_cdnPt_nv_V4 : + case Hexagon::POST_STwri_cPt_nv_V4 : + case Hexagon::POST_STwri_cdnPt_V4 : + return Hexagon::POST_STwri_cPt; + + case Hexagon::POST_STwri_cdnNotPt_nv_V4 : + case Hexagon::POST_STwri_cNotPt_nv_V4 : + case Hexagon::POST_STwri_cdnNotPt_V4 : + return Hexagon::POST_STwri_cNotPt; + + case Hexagon::STw_GP_cdnPt_nv_V4: + case Hexagon::STw_GP_cdnPt_V4: + case Hexagon::STw_GP_cPt_nv_V4: + return Hexagon::STw_GP_cPt_V4; + + case Hexagon::STw_GP_cdnNotPt_nv_V4: + case Hexagon::STw_GP_cdnNotPt_V4: + case Hexagon::STw_GP_cNotPt_nv_V4: + return Hexagon::STw_GP_cNotPt_V4; + + case Hexagon::STriw_GP_cdnPt_nv_V4: + case Hexagon::STriw_GP_cdnPt_V4: + case Hexagon::STriw_GP_cPt_nv_V4: + return Hexagon::STriw_GP_cPt_V4; + + case Hexagon::STriw_GP_cdnNotPt_nv_V4: + case Hexagon::STriw_GP_cdnNotPt_V4: + case Hexagon::STriw_GP_cNotPt_nv_V4: + return Hexagon::STriw_GP_cNotPt_V4; + + // Store new-value word - unconditional + + case Hexagon::STriw_nv_V4: + return Hexagon::STriw; + + case Hexagon::STriw_indexed_nv_V4: + return Hexagon::STriw_indexed; + + case Hexagon::STriw_indexed_shl_nv_V4: + return Hexagon::STriw_indexed_shl_V4; + + case Hexagon::STriw_shl_nv_V4: + return Hexagon::STriw_shl_V4; + + case Hexagon::STriw_GP_nv_V4: + return Hexagon::STriw_GP_V4; + + case Hexagon::STw_GP_nv_V4: + return Hexagon::STw_GP_V4; + + case Hexagon::POST_STwri_nv_V4: + return Hexagon::POST_STwri; + + // Store doubleword + + case Hexagon::STrid_cdnPt_V4 : + return Hexagon::STrid_cPt; + + case Hexagon::STrid_cdnNotPt_V4 : + return Hexagon::STrid_cNotPt; + + case Hexagon::STrid_indexed_cdnPt_V4 : + return Hexagon::STrid_indexed_cPt; + + case Hexagon::STrid_indexed_cdnNotPt_V4 : + return Hexagon::STrid_indexed_cNotPt; + + case Hexagon::STrid_indexed_shl_cdnPt_V4 : + return Hexagon::STrid_indexed_shl_cPt_V4; + + case Hexagon::STrid_indexed_shl_cdnNotPt_V4 : + return Hexagon::STrid_indexed_shl_cNotPt_V4; + + case Hexagon::POST_STdri_cdnPt_V4 : + return Hexagon::POST_STdri_cPt; + + case Hexagon::POST_STdri_cdnNotPt_V4 : + return Hexagon::POST_STdri_cNotPt; + + case Hexagon::STd_GP_cdnPt_V4 : + return Hexagon::STd_GP_cPt_V4; + + case Hexagon::STd_GP_cdnNotPt_V4 : + return Hexagon::STd_GP_cNotPt_V4; + + case Hexagon::STrid_GP_cdnPt_V4 : + return Hexagon::STrid_GP_cPt_V4; + + case Hexagon::STrid_GP_cdnNotPt_V4 : + return Hexagon::STrid_GP_cNotPt_V4; + } +} + +bool HexagonPacketizerList::DemoteToDotOld(MachineInstr* MI) { + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + int NewOpcode = GetDotOldOp(MI->getOpcode()); + MI->setDesc(QII->get(NewOpcode)); + return true; +} + +// Returns true if an instruction is predicated on p0 and false if it's +// predicated on !p0. + +static bool GetPredicateSense(MachineInstr* MI, + const HexagonInstrInfo *QII) { + + switch (MI->getOpcode()) { + default: llvm_unreachable("Unknown predicate sense of the instruction"); + case Hexagon::TFR_cPt: + case Hexagon::TFR_cdnPt: + case Hexagon::TFRI_cPt: + case Hexagon::TFRI_cdnPt: + case Hexagon::STrib_cPt : + case Hexagon::STrib_cdnPt_V4 : + case Hexagon::STrib_indexed_cPt : + case Hexagon::STrib_indexed_cdnPt_V4 : + case Hexagon::STrib_indexed_shl_cPt_V4 : + case Hexagon::STrib_indexed_shl_cdnPt_V4 : + case Hexagon::POST_STbri_cPt : + case Hexagon::POST_STbri_cdnPt_V4 : + case Hexagon::STrih_cPt : + case Hexagon::STrih_cdnPt_V4 : + case Hexagon::STrih_indexed_cPt : + case Hexagon::STrih_indexed_cdnPt_V4 : + case Hexagon::STrih_indexed_shl_cPt_V4 : + case Hexagon::STrih_indexed_shl_cdnPt_V4 : + case Hexagon::POST_SThri_cPt : + case Hexagon::POST_SThri_cdnPt_V4 : + case Hexagon::STriw_cPt : + case Hexagon::STriw_cdnPt_V4 : + case Hexagon::STriw_indexed_cPt : + case Hexagon::STriw_indexed_cdnPt_V4 : + case Hexagon::STriw_indexed_shl_cPt_V4 : + case Hexagon::STriw_indexed_shl_cdnPt_V4 : + case Hexagon::POST_STwri_cPt : + case Hexagon::POST_STwri_cdnPt_V4 : + case Hexagon::STrib_imm_cPt_V4 : + case Hexagon::STrib_imm_cdnPt_V4 : + case Hexagon::STrid_cPt : + case Hexagon::STrid_cdnPt_V4 : + case Hexagon::STrid_indexed_cPt : + case Hexagon::STrid_indexed_cdnPt_V4 : + case Hexagon::STrid_indexed_shl_cPt_V4 : + case Hexagon::STrid_indexed_shl_cdnPt_V4 : + case Hexagon::POST_STdri_cPt : + case Hexagon::POST_STdri_cdnPt_V4 : + case Hexagon::STrih_imm_cPt_V4 : + case Hexagon::STrih_imm_cdnPt_V4 : + case Hexagon::STriw_imm_cPt_V4 : + case Hexagon::STriw_imm_cdnPt_V4 : + case Hexagon::JMP_cdnPt : + case Hexagon::LDrid_cPt : + case Hexagon::LDrid_cdnPt : + case Hexagon::LDrid_indexed_cPt : + case Hexagon::LDrid_indexed_cdnPt : + case Hexagon::POST_LDrid_cPt : + case Hexagon::POST_LDrid_cdnPt_V4 : + case Hexagon::LDriw_cPt : + case Hexagon::LDriw_cdnPt : + case Hexagon::LDriw_indexed_cPt : + case Hexagon::LDriw_indexed_cdnPt : + case Hexagon::POST_LDriw_cPt : + case Hexagon::POST_LDriw_cdnPt_V4 : + case Hexagon::LDrih_cPt : + case Hexagon::LDrih_cdnPt : + case Hexagon::LDrih_indexed_cPt : + case Hexagon::LDrih_indexed_cdnPt : + case Hexagon::POST_LDrih_cPt : + case Hexagon::POST_LDrih_cdnPt_V4 : + case Hexagon::LDrib_cPt : + case Hexagon::LDrib_cdnPt : + case Hexagon::LDrib_indexed_cPt : + case Hexagon::LDrib_indexed_cdnPt : + case Hexagon::POST_LDrib_cPt : + case Hexagon::POST_LDrib_cdnPt_V4 : + case Hexagon::LDriuh_cPt : + case Hexagon::LDriuh_cdnPt : + case Hexagon::LDriuh_indexed_cPt : + case Hexagon::LDriuh_indexed_cdnPt : + case Hexagon::POST_LDriuh_cPt : + case Hexagon::POST_LDriuh_cdnPt_V4 : + case Hexagon::LDriub_cPt : + case Hexagon::LDriub_cdnPt : + case Hexagon::LDriub_indexed_cPt : + case Hexagon::LDriub_indexed_cdnPt : + case Hexagon::POST_LDriub_cPt : + case Hexagon::POST_LDriub_cdnPt_V4 : + case Hexagon::LDrid_indexed_cPt_V4 : + case Hexagon::LDrid_indexed_cdnPt_V4 : + case Hexagon::LDrid_indexed_shl_cPt_V4 : + case Hexagon::LDrid_indexed_shl_cdnPt_V4 : + case Hexagon::LDrib_indexed_cPt_V4 : + case Hexagon::LDrib_indexed_cdnPt_V4 : + case Hexagon::LDrib_indexed_shl_cPt_V4 : + case Hexagon::LDrib_indexed_shl_cdnPt_V4 : + case Hexagon::LDriub_indexed_cPt_V4 : + case Hexagon::LDriub_indexed_cdnPt_V4 : + case Hexagon::LDriub_indexed_shl_cPt_V4 : + case Hexagon::LDriub_indexed_shl_cdnPt_V4 : + case Hexagon::LDrih_indexed_cPt_V4 : + case Hexagon::LDrih_indexed_cdnPt_V4 : + case Hexagon::LDrih_indexed_shl_cPt_V4 : + case Hexagon::LDrih_indexed_shl_cdnPt_V4 : + case Hexagon::LDriuh_indexed_cPt_V4 : + case Hexagon::LDriuh_indexed_cdnPt_V4 : + case Hexagon::LDriuh_indexed_shl_cPt_V4 : + case Hexagon::LDriuh_indexed_shl_cdnPt_V4 : + case Hexagon::LDriw_indexed_cPt_V4 : + case Hexagon::LDriw_indexed_cdnPt_V4 : + case Hexagon::LDriw_indexed_shl_cPt_V4 : + case Hexagon::LDriw_indexed_shl_cdnPt_V4 : + case Hexagon::ADD_ri_cPt : + case Hexagon::ADD_ri_cdnPt : + case Hexagon::ADD_rr_cPt : + case Hexagon::ADD_rr_cdnPt : + case Hexagon::XOR_rr_cPt : + case Hexagon::XOR_rr_cdnPt : + case Hexagon::AND_rr_cPt : + case Hexagon::AND_rr_cdnPt : + case Hexagon::OR_rr_cPt : + case Hexagon::OR_rr_cdnPt : + case Hexagon::SUB_rr_cPt : + case Hexagon::SUB_rr_cdnPt : + case Hexagon::COMBINE_rr_cPt : + case Hexagon::COMBINE_rr_cdnPt : + case Hexagon::ASLH_cPt_V4 : + case Hexagon::ASLH_cdnPt_V4 : + case Hexagon::ASRH_cPt_V4 : + case Hexagon::ASRH_cdnPt_V4 : + case Hexagon::SXTB_cPt_V4 : + case Hexagon::SXTB_cdnPt_V4 : + case Hexagon::SXTH_cPt_V4 : + case Hexagon::SXTH_cdnPt_V4 : + case Hexagon::ZXTB_cPt_V4 : + case Hexagon::ZXTB_cdnPt_V4 : + case Hexagon::ZXTH_cPt_V4 : + case Hexagon::ZXTH_cdnPt_V4 : + case Hexagon::LDrid_GP_cPt_V4 : + case Hexagon::LDrib_GP_cPt_V4 : + case Hexagon::LDriub_GP_cPt_V4 : + case Hexagon::LDrih_GP_cPt_V4 : + case Hexagon::LDriuh_GP_cPt_V4 : + case Hexagon::LDriw_GP_cPt_V4 : + case Hexagon::LDd_GP_cPt_V4 : + case Hexagon::LDb_GP_cPt_V4 : + case Hexagon::LDub_GP_cPt_V4 : + case Hexagon::LDh_GP_cPt_V4 : + case Hexagon::LDuh_GP_cPt_V4 : + case Hexagon::LDw_GP_cPt_V4 : + case Hexagon::STrid_GP_cPt_V4 : + case Hexagon::STrib_GP_cPt_V4 : + case Hexagon::STrih_GP_cPt_V4 : + case Hexagon::STriw_GP_cPt_V4 : + case Hexagon::STd_GP_cPt_V4 : + case Hexagon::STb_GP_cPt_V4 : + case Hexagon::STh_GP_cPt_V4 : + case Hexagon::STw_GP_cPt_V4 : + case Hexagon::LDrid_GP_cdnPt_V4 : + case Hexagon::LDrib_GP_cdnPt_V4 : + case Hexagon::LDriub_GP_cdnPt_V4 : + case Hexagon::LDrih_GP_cdnPt_V4 : + case Hexagon::LDriuh_GP_cdnPt_V4 : + case Hexagon::LDriw_GP_cdnPt_V4 : + case Hexagon::LDd_GP_cdnPt_V4 : + case Hexagon::LDb_GP_cdnPt_V4 : + case Hexagon::LDub_GP_cdnPt_V4 : + case Hexagon::LDh_GP_cdnPt_V4 : + case Hexagon::LDuh_GP_cdnPt_V4 : + case Hexagon::LDw_GP_cdnPt_V4 : + case Hexagon::STrid_GP_cdnPt_V4 : + case Hexagon::STrib_GP_cdnPt_V4 : + case Hexagon::STrih_GP_cdnPt_V4 : + case Hexagon::STriw_GP_cdnPt_V4 : + case Hexagon::STd_GP_cdnPt_V4 : + case Hexagon::STb_GP_cdnPt_V4 : + case Hexagon::STh_GP_cdnPt_V4 : + case Hexagon::STw_GP_cdnPt_V4 : + return true; + + case Hexagon::TFR_cNotPt: + case Hexagon::TFR_cdnNotPt: + case Hexagon::TFRI_cNotPt: + case Hexagon::TFRI_cdnNotPt: + case Hexagon::STrib_cNotPt : + case Hexagon::STrib_cdnNotPt_V4 : + case Hexagon::STrib_indexed_cNotPt : + case Hexagon::STrib_indexed_cdnNotPt_V4 : + case Hexagon::STrib_indexed_shl_cNotPt_V4 : + case Hexagon::STrib_indexed_shl_cdnNotPt_V4 : + case Hexagon::POST_STbri_cNotPt : + case Hexagon::POST_STbri_cdnNotPt_V4 : + case Hexagon::STrih_cNotPt : + case Hexagon::STrih_cdnNotPt_V4 : + case Hexagon::STrih_indexed_cNotPt : + case Hexagon::STrih_indexed_cdnNotPt_V4 : + case Hexagon::STrih_indexed_shl_cNotPt_V4 : + case Hexagon::STrih_indexed_shl_cdnNotPt_V4 : + case Hexagon::POST_SThri_cNotPt : + case Hexagon::POST_SThri_cdnNotPt_V4 : + case Hexagon::STriw_cNotPt : + case Hexagon::STriw_cdnNotPt_V4 : + case Hexagon::STriw_indexed_cNotPt : + case Hexagon::STriw_indexed_cdnNotPt_V4 : + case Hexagon::STriw_indexed_shl_cNotPt_V4 : + case Hexagon::STriw_indexed_shl_cdnNotPt_V4 : + case Hexagon::POST_STwri_cNotPt : + case Hexagon::POST_STwri_cdnNotPt_V4 : + case Hexagon::STrib_imm_cNotPt_V4 : + case Hexagon::STrib_imm_cdnNotPt_V4 : + case Hexagon::STrid_cNotPt : + case Hexagon::STrid_cdnNotPt_V4 : + case Hexagon::STrid_indexed_cdnNotPt_V4 : + case Hexagon::STrid_indexed_cNotPt : + case Hexagon::STrid_indexed_shl_cNotPt_V4 : + case Hexagon::STrid_indexed_shl_cdnNotPt_V4 : + case Hexagon::POST_STdri_cNotPt : + case Hexagon::POST_STdri_cdnNotPt_V4 : + case Hexagon::STrih_imm_cNotPt_V4 : + case Hexagon::STrih_imm_cdnNotPt_V4 : + case Hexagon::STriw_imm_cNotPt_V4 : + case Hexagon::STriw_imm_cdnNotPt_V4 : + case Hexagon::JMP_cdnNotPt : + case Hexagon::LDrid_cNotPt : + case Hexagon::LDrid_cdnNotPt : + case Hexagon::LDrid_indexed_cNotPt : + case Hexagon::LDrid_indexed_cdnNotPt : + case Hexagon::POST_LDrid_cNotPt : + case Hexagon::POST_LDrid_cdnNotPt_V4 : + case Hexagon::LDriw_cNotPt : + case Hexagon::LDriw_cdnNotPt : + case Hexagon::LDriw_indexed_cNotPt : + case Hexagon::LDriw_indexed_cdnNotPt : + case Hexagon::POST_LDriw_cNotPt : + case Hexagon::POST_LDriw_cdnNotPt_V4 : + case Hexagon::LDrih_cNotPt : + case Hexagon::LDrih_cdnNotPt : + case Hexagon::LDrih_indexed_cNotPt : + case Hexagon::LDrih_indexed_cdnNotPt : + case Hexagon::POST_LDrih_cNotPt : + case Hexagon::POST_LDrih_cdnNotPt_V4 : + case Hexagon::LDrib_cNotPt : + case Hexagon::LDrib_cdnNotPt : + case Hexagon::LDrib_indexed_cNotPt : + case Hexagon::LDrib_indexed_cdnNotPt : + case Hexagon::POST_LDrib_cNotPt : + case Hexagon::POST_LDrib_cdnNotPt_V4 : + case Hexagon::LDriuh_cNotPt : + case Hexagon::LDriuh_cdnNotPt : + case Hexagon::LDriuh_indexed_cNotPt : + case Hexagon::LDriuh_indexed_cdnNotPt : + case Hexagon::POST_LDriuh_cNotPt : + case Hexagon::POST_LDriuh_cdnNotPt_V4 : + case Hexagon::LDriub_cNotPt : + case Hexagon::LDriub_cdnNotPt : + case Hexagon::LDriub_indexed_cNotPt : + case Hexagon::LDriub_indexed_cdnNotPt : + case Hexagon::POST_LDriub_cNotPt : + case Hexagon::POST_LDriub_cdnNotPt_V4 : + case Hexagon::LDrid_indexed_cNotPt_V4 : + case Hexagon::LDrid_indexed_cdnNotPt_V4 : + case Hexagon::LDrid_indexed_shl_cNotPt_V4 : + case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDrib_indexed_cNotPt_V4 : + case Hexagon::LDrib_indexed_cdnNotPt_V4 : + case Hexagon::LDrib_indexed_shl_cNotPt_V4 : + case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDriub_indexed_cNotPt_V4 : + case Hexagon::LDriub_indexed_cdnNotPt_V4 : + case Hexagon::LDriub_indexed_shl_cNotPt_V4 : + case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDrih_indexed_cNotPt_V4 : + case Hexagon::LDrih_indexed_cdnNotPt_V4 : + case Hexagon::LDrih_indexed_shl_cNotPt_V4 : + case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDriuh_indexed_cNotPt_V4 : + case Hexagon::LDriuh_indexed_cdnNotPt_V4 : + case Hexagon::LDriuh_indexed_shl_cNotPt_V4 : + case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDriw_indexed_cNotPt_V4 : + case Hexagon::LDriw_indexed_cdnNotPt_V4 : + case Hexagon::LDriw_indexed_shl_cNotPt_V4 : + case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 : + case Hexagon::ADD_ri_cNotPt : + case Hexagon::ADD_ri_cdnNotPt : + case Hexagon::ADD_rr_cNotPt : + case Hexagon::ADD_rr_cdnNotPt : + case Hexagon::XOR_rr_cNotPt : + case Hexagon::XOR_rr_cdnNotPt : + case Hexagon::AND_rr_cNotPt : + case Hexagon::AND_rr_cdnNotPt : + case Hexagon::OR_rr_cNotPt : + case Hexagon::OR_rr_cdnNotPt : + case Hexagon::SUB_rr_cNotPt : + case Hexagon::SUB_rr_cdnNotPt : + case Hexagon::COMBINE_rr_cNotPt : + case Hexagon::COMBINE_rr_cdnNotPt : + case Hexagon::ASLH_cNotPt_V4 : + case Hexagon::ASLH_cdnNotPt_V4 : + case Hexagon::ASRH_cNotPt_V4 : + case Hexagon::ASRH_cdnNotPt_V4 : + case Hexagon::SXTB_cNotPt_V4 : + case Hexagon::SXTB_cdnNotPt_V4 : + case Hexagon::SXTH_cNotPt_V4 : + case Hexagon::SXTH_cdnNotPt_V4 : + case Hexagon::ZXTB_cNotPt_V4 : + case Hexagon::ZXTB_cdnNotPt_V4 : + case Hexagon::ZXTH_cNotPt_V4 : + case Hexagon::ZXTH_cdnNotPt_V4 : + + case Hexagon::LDrid_GP_cNotPt_V4 : + case Hexagon::LDrib_GP_cNotPt_V4 : + case Hexagon::LDriub_GP_cNotPt_V4 : + case Hexagon::LDrih_GP_cNotPt_V4 : + case Hexagon::LDriuh_GP_cNotPt_V4 : + case Hexagon::LDriw_GP_cNotPt_V4 : + case Hexagon::LDd_GP_cNotPt_V4 : + case Hexagon::LDb_GP_cNotPt_V4 : + case Hexagon::LDub_GP_cNotPt_V4 : + case Hexagon::LDh_GP_cNotPt_V4 : + case Hexagon::LDuh_GP_cNotPt_V4 : + case Hexagon::LDw_GP_cNotPt_V4 : + case Hexagon::STrid_GP_cNotPt_V4 : + case Hexagon::STrib_GP_cNotPt_V4 : + case Hexagon::STrih_GP_cNotPt_V4 : + case Hexagon::STriw_GP_cNotPt_V4 : + case Hexagon::STd_GP_cNotPt_V4 : + case Hexagon::STb_GP_cNotPt_V4 : + case Hexagon::STh_GP_cNotPt_V4 : + case Hexagon::STw_GP_cNotPt_V4 : + case Hexagon::LDrid_GP_cdnNotPt_V4 : + case Hexagon::LDrib_GP_cdnNotPt_V4 : + case Hexagon::LDriub_GP_cdnNotPt_V4 : + case Hexagon::LDrih_GP_cdnNotPt_V4 : + case Hexagon::LDriuh_GP_cdnNotPt_V4 : + case Hexagon::LDriw_GP_cdnNotPt_V4 : + case Hexagon::LDd_GP_cdnNotPt_V4 : + case Hexagon::LDb_GP_cdnNotPt_V4 : + case Hexagon::LDub_GP_cdnNotPt_V4 : + case Hexagon::LDh_GP_cdnNotPt_V4 : + case Hexagon::LDuh_GP_cdnNotPt_V4 : + case Hexagon::LDw_GP_cdnNotPt_V4 : + case Hexagon::STrid_GP_cdnNotPt_V4 : + case Hexagon::STrib_GP_cdnNotPt_V4 : + case Hexagon::STrih_GP_cdnNotPt_V4 : + case Hexagon::STriw_GP_cdnNotPt_V4 : + case Hexagon::STd_GP_cdnNotPt_V4 : + case Hexagon::STb_GP_cdnNotPt_V4 : + case Hexagon::STh_GP_cdnNotPt_V4 : + case Hexagon::STw_GP_cdnNotPt_V4 : + return false; + } + // return *some value* to avoid compiler warning + return false; +} + +bool HexagonPacketizerList::isDotNewInst(MachineInstr* MI) { + if (isNewValueInst(MI)) + return true; + + switch (MI->getOpcode()) { + case Hexagon::TFR_cdnNotPt: + case Hexagon::TFR_cdnPt: + case Hexagon::TFRI_cdnNotPt: + case Hexagon::TFRI_cdnPt: + case Hexagon::LDrid_cdnPt : + case Hexagon::LDrid_cdnNotPt : + case Hexagon::LDrid_indexed_cdnPt : + case Hexagon::LDrid_indexed_cdnNotPt : + case Hexagon::POST_LDrid_cdnPt_V4 : + case Hexagon::POST_LDrid_cdnNotPt_V4 : + case Hexagon::LDriw_cdnPt : + case Hexagon::LDriw_cdnNotPt : + case Hexagon::LDriw_indexed_cdnPt : + case Hexagon::LDriw_indexed_cdnNotPt : + case Hexagon::POST_LDriw_cdnPt_V4 : + case Hexagon::POST_LDriw_cdnNotPt_V4 : + case Hexagon::LDrih_cdnPt : + case Hexagon::LDrih_cdnNotPt : + case Hexagon::LDrih_indexed_cdnPt : + case Hexagon::LDrih_indexed_cdnNotPt : + case Hexagon::POST_LDrih_cdnPt_V4 : + case Hexagon::POST_LDrih_cdnNotPt_V4 : + case Hexagon::LDrib_cdnPt : + case Hexagon::LDrib_cdnNotPt : + case Hexagon::LDrib_indexed_cdnPt : + case Hexagon::LDrib_indexed_cdnNotPt : + case Hexagon::POST_LDrib_cdnPt_V4 : + case Hexagon::POST_LDrib_cdnNotPt_V4 : + case Hexagon::LDriuh_cdnPt : + case Hexagon::LDriuh_cdnNotPt : + case Hexagon::LDriuh_indexed_cdnPt : + case Hexagon::LDriuh_indexed_cdnNotPt : + case Hexagon::POST_LDriuh_cdnPt_V4 : + case Hexagon::POST_LDriuh_cdnNotPt_V4 : + case Hexagon::LDriub_cdnPt : + case Hexagon::LDriub_cdnNotPt : + case Hexagon::LDriub_indexed_cdnPt : + case Hexagon::LDriub_indexed_cdnNotPt : + case Hexagon::POST_LDriub_cdnPt_V4 : + case Hexagon::POST_LDriub_cdnNotPt_V4 : + + case Hexagon::LDrid_indexed_cdnPt_V4 : + case Hexagon::LDrid_indexed_cdnNotPt_V4 : + case Hexagon::LDrid_indexed_shl_cdnPt_V4 : + case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDrib_indexed_cdnPt_V4 : + case Hexagon::LDrib_indexed_cdnNotPt_V4 : + case Hexagon::LDrib_indexed_shl_cdnPt_V4 : + case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDriub_indexed_cdnPt_V4 : + case Hexagon::LDriub_indexed_cdnNotPt_V4 : + case Hexagon::LDriub_indexed_shl_cdnPt_V4 : + case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDrih_indexed_cdnPt_V4 : + case Hexagon::LDrih_indexed_cdnNotPt_V4 : + case Hexagon::LDrih_indexed_shl_cdnPt_V4 : + case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDriuh_indexed_cdnPt_V4 : + case Hexagon::LDriuh_indexed_cdnNotPt_V4 : + case Hexagon::LDriuh_indexed_shl_cdnPt_V4 : + case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 : + case Hexagon::LDriw_indexed_cdnPt_V4 : + case Hexagon::LDriw_indexed_cdnNotPt_V4 : + case Hexagon::LDriw_indexed_shl_cdnPt_V4 : + case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 : + +// Coditional add + case Hexagon::ADD_ri_cdnPt: + case Hexagon::ADD_ri_cdnNotPt: + case Hexagon::ADD_rr_cdnPt: + case Hexagon::ADD_rr_cdnNotPt: + + // Conditional logical operations + case Hexagon::XOR_rr_cdnPt : + case Hexagon::XOR_rr_cdnNotPt : + case Hexagon::AND_rr_cdnPt : + case Hexagon::AND_rr_cdnNotPt : + case Hexagon::OR_rr_cdnPt : + case Hexagon::OR_rr_cdnNotPt : + + // Conditonal subtract + case Hexagon::SUB_rr_cdnPt : + case Hexagon::SUB_rr_cdnNotPt : + + // Conditional combine + case Hexagon::COMBINE_rr_cdnPt : + case Hexagon::COMBINE_rr_cdnNotPt : + + // Conditional shift operations + case Hexagon::ASLH_cdnPt_V4: + case Hexagon::ASLH_cdnNotPt_V4: + case Hexagon::ASRH_cdnPt_V4: + case Hexagon::ASRH_cdnNotPt_V4: + case Hexagon::SXTB_cdnPt_V4: + case Hexagon::SXTB_cdnNotPt_V4: + case Hexagon::SXTH_cdnPt_V4: + case Hexagon::SXTH_cdnNotPt_V4: + case Hexagon::ZXTB_cdnPt_V4: + case Hexagon::ZXTB_cdnNotPt_V4: + case Hexagon::ZXTH_cdnPt_V4: + case Hexagon::ZXTH_cdnNotPt_V4: + + // Conditional stores + case Hexagon::STrib_imm_cdnPt_V4 : + case Hexagon::STrib_imm_cdnNotPt_V4 : + case Hexagon::STrib_cdnPt_V4 : + case Hexagon::STrib_cdnNotPt_V4 : + case Hexagon::STrib_indexed_cdnPt_V4 : + case Hexagon::STrib_indexed_cdnNotPt_V4 : + case Hexagon::POST_STbri_cdnPt_V4 : + case Hexagon::POST_STbri_cdnNotPt_V4 : + case Hexagon::STrib_indexed_shl_cdnPt_V4 : + case Hexagon::STrib_indexed_shl_cdnNotPt_V4 : + + // Store doubleword conditionally + case Hexagon::STrid_indexed_cdnPt_V4 : + case Hexagon::STrid_indexed_cdnNotPt_V4 : + case Hexagon::STrid_indexed_shl_cdnPt_V4 : + case Hexagon::STrid_indexed_shl_cdnNotPt_V4 : + case Hexagon::POST_STdri_cdnPt_V4 : + case Hexagon::POST_STdri_cdnNotPt_V4 : + + // Store halfword conditionally + case Hexagon::STrih_cdnPt_V4 : + case Hexagon::STrih_cdnNotPt_V4 : + case Hexagon::STrih_indexed_cdnPt_V4 : + case Hexagon::STrih_indexed_cdnNotPt_V4 : + case Hexagon::STrih_imm_cdnPt_V4 : + case Hexagon::STrih_imm_cdnNotPt_V4 : + case Hexagon::STrih_indexed_shl_cdnPt_V4 : + case Hexagon::STrih_indexed_shl_cdnNotPt_V4 : + case Hexagon::POST_SThri_cdnPt_V4 : + case Hexagon::POST_SThri_cdnNotPt_V4 : + + // Store word conditionally + case Hexagon::STriw_cdnPt_V4 : + case Hexagon::STriw_cdnNotPt_V4 : + case Hexagon::STriw_indexed_cdnPt_V4 : + case Hexagon::STriw_indexed_cdnNotPt_V4 : + case Hexagon::STriw_imm_cdnPt_V4 : + case Hexagon::STriw_imm_cdnNotPt_V4 : + case Hexagon::STriw_indexed_shl_cdnPt_V4 : + case Hexagon::STriw_indexed_shl_cdnNotPt_V4 : + case Hexagon::POST_STwri_cdnPt_V4 : + case Hexagon::POST_STwri_cdnNotPt_V4 : + + case Hexagon::LDd_GP_cdnPt_V4: + case Hexagon::LDd_GP_cdnNotPt_V4: + case Hexagon::LDb_GP_cdnPt_V4: + case Hexagon::LDb_GP_cdnNotPt_V4: + case Hexagon::LDub_GP_cdnPt_V4: + case Hexagon::LDub_GP_cdnNotPt_V4: + case Hexagon::LDh_GP_cdnPt_V4: + case Hexagon::LDh_GP_cdnNotPt_V4: + case Hexagon::LDuh_GP_cdnPt_V4: + case Hexagon::LDuh_GP_cdnNotPt_V4: + case Hexagon::LDw_GP_cdnPt_V4: + case Hexagon::LDw_GP_cdnNotPt_V4: + case Hexagon::LDrid_GP_cdnPt_V4: + case Hexagon::LDrid_GP_cdnNotPt_V4: + case Hexagon::LDrib_GP_cdnPt_V4: + case Hexagon::LDrib_GP_cdnNotPt_V4: + case Hexagon::LDriub_GP_cdnPt_V4: + case Hexagon::LDriub_GP_cdnNotPt_V4: + case Hexagon::LDrih_GP_cdnPt_V4: + case Hexagon::LDrih_GP_cdnNotPt_V4: + case Hexagon::LDriuh_GP_cdnPt_V4: + case Hexagon::LDriuh_GP_cdnNotPt_V4: + case Hexagon::LDriw_GP_cdnPt_V4: + case Hexagon::LDriw_GP_cdnNotPt_V4: + + case Hexagon::STrid_GP_cdnPt_V4: + case Hexagon::STrid_GP_cdnNotPt_V4: + case Hexagon::STrib_GP_cdnPt_V4: + case Hexagon::STrib_GP_cdnNotPt_V4: + case Hexagon::STrih_GP_cdnPt_V4: + case Hexagon::STrih_GP_cdnNotPt_V4: + case Hexagon::STriw_GP_cdnPt_V4: + case Hexagon::STriw_GP_cdnNotPt_V4: + case Hexagon::STd_GP_cdnPt_V4: + case Hexagon::STd_GP_cdnNotPt_V4: + case Hexagon::STb_GP_cdnPt_V4: + case Hexagon::STb_GP_cdnNotPt_V4: + case Hexagon::STh_GP_cdnPt_V4: + case Hexagon::STh_GP_cdnNotPt_V4: + case Hexagon::STw_GP_cdnPt_V4: + case Hexagon::STw_GP_cdnNotPt_V4: + return true; + } + return false; +} + +static MachineOperand& GetPostIncrementOperand(MachineInstr *MI, + const HexagonInstrInfo *QII) { + assert(QII->isPostIncrement(MI) && "Not a post increment operation."); +#ifndef NDEBUG + // Post Increment means duplicates. Use dense map to find duplicates in the + // list. Caution: Densemap initializes with the minimum of 64 buckets, + // whereas there are at most 5 operands in the post increment. + DenseMap<unsigned, unsigned> DefRegsSet; + for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) + if (MI->getOperand(opNum).isReg() && + MI->getOperand(opNum).isDef()) { + DefRegsSet[MI->getOperand(opNum).getReg()] = 1; + } + + for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) + if (MI->getOperand(opNum).isReg() && + MI->getOperand(opNum).isUse()) { + if (DefRegsSet[MI->getOperand(opNum).getReg()]) { + return MI->getOperand(opNum); + } + } +#else + if (MI->getDesc().mayLoad()) { + // The 2nd operand is always the post increment operand in load. + assert(MI->getOperand(1).isReg() && + "Post increment operand has be to a register."); + return (MI->getOperand(1)); + } + if (MI->getDesc().mayStore()) { + // The 1st operand is always the post increment operand in store. + assert(MI->getOperand(0).isReg() && + "Post increment operand has be to a register."); + return (MI->getOperand(0)); + } +#endif + // we should never come here. + llvm_unreachable("mayLoad or mayStore not set for Post Increment operation"); +} + +// get the value being stored +static MachineOperand& GetStoreValueOperand(MachineInstr *MI) { + // value being stored is always the last operand. + return (MI->getOperand(MI->getNumOperands()-1)); +} + +// can be new value store? +// Following restrictions are to be respected in convert a store into +// a new value store. +// 1. If an instruction uses auto-increment, its address register cannot +// be a new-value register. Arch Spec 5.4.2.1 +// 2. If an instruction uses absolute-set addressing mode, +// its address register cannot be a new-value register. +// Arch Spec 5.4.2.1.TODO: This is not enabled as +// as absolute-set address mode patters are not implemented. +// 3. If an instruction produces a 64-bit result, its registers cannot be used +// as new-value registers. Arch Spec 5.4.2.2. +// 4. If the instruction that sets a new-value register is conditional, then +// the instruction that uses the new-value register must also be conditional, +// and both must always have their predicates evaluate identically. +// Arch Spec 5.4.2.3. +// 5. There is an implied restriction of a packet can not have another store, +// if there is a new value store in the packet. Corollary, if there is +// already a store in a packet, there can not be a new value store. +// Arch Spec: 3.4.4.2 +bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI, + MachineInstr *PacketMI, unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit) +{ + // Make sure we are looking at the store + if (!IsNewifyStore(MI)) + return false; + + // Make sure there is dependency and can be new value'ed + if (GetStoreValueOperand(MI).isReg() && + GetStoreValueOperand(MI).getReg() != DepReg) + return false; + + const HexagonRegisterInfo* QRI = + (const HexagonRegisterInfo *) TM.getRegisterInfo(); + const MCInstrDesc& MCID = PacketMI->getDesc(); + // first operand is always the result + + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + const TargetRegisterClass* PacketRC = QII->getRegClass(MCID, 0, QRI, MF); + + // if there is already an store in the packet, no can do new value store + // Arch Spec 3.4.4.2. + for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(), + VE = CurrentPacketMIs.end(); + (VI != VE); ++VI) { + SUnit* PacketSU = MIToSUnit[*VI]; + if (PacketSU->getInstr()->getDesc().mayStore() || + // if we have mayStore = 1 set on ALLOCFRAME and DEALLOCFRAME, + // then we don't need this + PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME || + PacketSU->getInstr()->getOpcode() == Hexagon::DEALLOCFRAME) + return false; + } + + if (PacketRC == &Hexagon::DoubleRegsRegClass) { + // new value store constraint: double regs can not feed into new value store + // arch spec section: 5.4.2.2 + return false; + } + + // Make sure it's NOT the post increment register that we are going to + // new value. + if (QII->isPostIncrement(MI) && + MI->getDesc().mayStore() && + GetPostIncrementOperand(MI, QII).getReg() == DepReg) { + return false; + } + + if (QII->isPostIncrement(PacketMI) && + PacketMI->getDesc().mayLoad() && + GetPostIncrementOperand(PacketMI, QII).getReg() == DepReg) { + // if source is post_inc, or absolute-set addressing, + // it can not feed into new value store + // r3 = memw(r2++#4) + // memw(r30 + #-1404) = r2.new -> can not be new value store + // arch spec section: 5.4.2.1 + return false; + } + + // If the source that feeds the store is predicated, new value store must + // also be also predicated. + if (QII->isPredicated(PacketMI)) { + if (!QII->isPredicated(MI)) + return false; + + // Check to make sure that they both will have their predicates + // evaluate identically + unsigned predRegNumSrc = 0; + unsigned predRegNumDst = 0; + const TargetRegisterClass* predRegClass = NULL; + + // Get predicate register used in the source instruction + for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) { + if ( PacketMI->getOperand(opNum).isReg()) + predRegNumSrc = PacketMI->getOperand(opNum).getReg(); + predRegClass = QRI->getMinimalPhysRegClass(predRegNumSrc); + if (predRegClass == &Hexagon::PredRegsRegClass) { + break; + } + } + assert ((predRegClass == &Hexagon::PredRegsRegClass ) && + ("predicate register not found in a predicated PacketMI instruction")); + + // Get predicate register used in new-value store instruction + for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) { + if ( MI->getOperand(opNum).isReg()) + predRegNumDst = MI->getOperand(opNum).getReg(); + predRegClass = QRI->getMinimalPhysRegClass(predRegNumDst); + if (predRegClass == &Hexagon::PredRegsRegClass) { + break; + } + } + assert ((predRegClass == &Hexagon::PredRegsRegClass ) && + ("predicate register not found in a predicated MI instruction")); + + // New-value register producer and user (store) need to satisfy these + // constraints: + // 1) Both instructions should be predicated on the same register. + // 2) If producer of the new-value register is .new predicated then store + // should also be .new predicated and if producer is not .new predicated + // then store should not be .new predicated. + // 3) Both new-value register producer and user should have same predicate + // sense, i.e, either both should be negated or both should be none negated. + + if (( predRegNumDst != predRegNumSrc) || + isDotNewInst(PacketMI) != isDotNewInst(MI) || + GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI, QII)) { + return false; + } + } + + // Make sure that other than the new-value register no other store instruction + // register has been modified in the same packet. Predicate registers can be + // modified by they should not be modified between the producer and the store + // instruction as it will make them both conditional on different values. + // We already know this to be true for all the instructions before and + // including PacketMI. Howerver, we need to perform the check for the + // remaining instructions in the packet. + + std::vector<MachineInstr*>::iterator VI; + std::vector<MachineInstr*>::iterator VE; + unsigned StartCheck = 0; + + for (VI=CurrentPacketMIs.begin(), VE = CurrentPacketMIs.end(); + (VI != VE); ++VI) { + SUnit* TempSU = MIToSUnit[*VI]; + MachineInstr* TempMI = TempSU->getInstr(); + + // Following condition is true for all the instructions until PacketMI is + // reached (StartCheck is set to 0 before the for loop). + // StartCheck flag is 1 for all the instructions after PacketMI. + if (TempMI != PacketMI && !StartCheck) // start processing only after + continue; // encountering PacketMI + + StartCheck = 1; + if (TempMI == PacketMI) // We don't want to check PacketMI for dependence + continue; + + for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) { + if (MI->getOperand(opNum).isReg() && + TempSU->getInstr()->modifiesRegister(MI->getOperand(opNum).getReg(), + QRI)) + return false; + } + } + + // Make sure that for non POST_INC stores: + // 1. The only use of reg is DepReg and no other registers. + // This handles V4 base+index registers. + // The following store can not be dot new. + // Eg. r0 = add(r0, #3)a + // memw(r1+r0<<#2) = r0 + if (!QII->isPostIncrement(MI) && + GetStoreValueOperand(MI).isReg() && + GetStoreValueOperand(MI).getReg() == DepReg) { + for(unsigned opNum = 0; opNum < MI->getNumOperands()-1; opNum++) { + if (MI->getOperand(opNum).isReg() && + MI->getOperand(opNum).getReg() == DepReg) { + return false; + } + } + // 2. If data definition is because of implicit definition of the register, + // do not newify the store. Eg. + // %R9<def> = ZXTH %R12, %D6<imp-use>, %R12<imp-def> + // STrih_indexed %R8, 2, %R12<kill>; mem:ST2[%scevgep343] + for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) { + if (PacketMI->getOperand(opNum).isReg() && + PacketMI->getOperand(opNum).getReg() == DepReg && + PacketMI->getOperand(opNum).isDef() && + PacketMI->getOperand(opNum).isImplicit()) { + return false; + } + } + } + + // Can be dot new store. + return true; +} + +// can this MI to promoted to either +// new value store or new value jump +bool HexagonPacketizerList::CanPromoteToNewValue( MachineInstr *MI, + SUnit *PacketSU, unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit, + MachineBasicBlock::iterator &MII) +{ + + const HexagonRegisterInfo* QRI = + (const HexagonRegisterInfo *) TM.getRegisterInfo(); + if (!QRI->Subtarget.hasV4TOps() || + !IsNewifyStore(MI)) + return false; + + MachineInstr *PacketMI = PacketSU->getInstr(); + + // Check to see the store can be new value'ed. + if (CanPromoteToNewValueStore(MI, PacketMI, DepReg, MIToSUnit)) + return true; + + // Check to see the compare/jump can be new value'ed. + // This is done as a pass on its own. Don't need to check it here. + return false; +} + +// Check to see if an instruction can be dot new +// There are three kinds. +// 1. dot new on predicate - V2/V3/V4 +// 2. dot new on stores NV/ST - V4 +// 3. dot new on jump NV/J - V4 -- This is generated in a pass. +bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI, + SUnit *PacketSU, unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit, + MachineBasicBlock::iterator &MII, + const TargetRegisterClass* RC ) +{ + // already a dot new instruction + if (isDotNewInst(MI) && !IsNewifyStore(MI)) + return false; + + if (!isNewifiable(MI)) + return false; + + // predicate .new + if (RC == &Hexagon::PredRegsRegClass && isCondInst(MI)) + return true; + else if (RC != &Hexagon::PredRegsRegClass && + !IsNewifyStore(MI)) // MI is not a new-value store + return false; + else { + // Create a dot new machine instruction to see if resources can be + // allocated. If not, bail out now. + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + int NewOpcode = GetDotNewOp(MI->getOpcode()); + const MCInstrDesc &desc = QII->get(NewOpcode); + DebugLoc dl; + MachineInstr *NewMI = + MI->getParent()->getParent()->CreateMachineInstr(desc, dl); + bool ResourcesAvailable = ResourceTracker->canReserveResources(NewMI); + MI->getParent()->getParent()->DeleteMachineInstr(NewMI); + + if (!ResourcesAvailable) + return false; + + // new value store only + // new new value jump generated as a passes + if (!CanPromoteToNewValue(MI, PacketSU, DepReg, MIToSUnit, MII)) { + return false; + } + } + return true; +} + +// Go through the packet instructions and search for anti dependency +// between them and DepReg from MI +// Consider this case: +// Trying to add +// a) %R1<def> = TFRI_cdNotPt %P3, 2 +// to this packet: +// { +// b) %P0<def> = OR_pp %P3<kill>, %P0<kill> +// c) %P3<def> = TFR_PdRs %R23 +// d) %R1<def> = TFRI_cdnPt %P3, 4 +// } +// The P3 from a) and d) will be complements after +// a)'s P3 is converted to .new form +// Anti Dep between c) and b) is irrelevant for this case +bool HexagonPacketizerList::RestrictingDepExistInPacket (MachineInstr* MI, + unsigned DepReg, + std::map <MachineInstr*, SUnit*> MIToSUnit) { + + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + SUnit* PacketSUDep = MIToSUnit[MI]; + + for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(), + VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) { + + // We only care for dependencies to predicated instructions + if(!QII->isPredicated(*VIN)) continue; + + // Scheduling Unit for current insn in the packet + SUnit* PacketSU = MIToSUnit[*VIN]; + + // Look at dependencies between current members of the packet + // and predicate defining instruction MI. + // Make sure that dependency is on the exact register + // we care about. + if (PacketSU->isSucc(PacketSUDep)) { + for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) { + if ((PacketSU->Succs[i].getSUnit() == PacketSUDep) && + (PacketSU->Succs[i].getKind() == SDep::Anti) && + (PacketSU->Succs[i].getReg() == DepReg)) { + return true; + } + } + } + } + + return false; +} + + +// Given two predicated instructions, this function detects whether +// the predicates are complements +bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1, + MachineInstr* MI2, std::map <MachineInstr*, SUnit*> MIToSUnit) { + + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + // Currently can only reason about conditional transfers + if (!QII->isConditionalTransfer(MI1) || !QII->isConditionalTransfer(MI2)) { + return false; + } + + // Scheduling unit for candidate + SUnit* SU = MIToSUnit[MI1]; + + // One corner case deals with the following scenario: + // Trying to add + // a) %R24<def> = TFR_cPt %P0, %R25 + // to this packet: + // + // { + // b) %R25<def> = TFR_cNotPt %P0, %R24 + // c) %P0<def> = CMPEQri %R26, 1 + // } + // + // On general check a) and b) are complements, but + // presence of c) will convert a) to .new form, and + // then it is not a complement + // We attempt to detect it by analyzing existing + // dependencies in the packet + + // Analyze relationships between all existing members of the packet. + // Look for Anti dependecy on the same predicate reg + // as used in the candidate + for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(), + VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) { + + // Scheduling Unit for current insn in the packet + SUnit* PacketSU = MIToSUnit[*VIN]; + + // If this instruction in the packet is succeeded by the candidate... + if (PacketSU->isSucc(SU)) { + for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) { + // The corner case exist when there is true data + // dependency between candidate and one of current + // packet members, this dep is on predicate reg, and + // there already exist anti dep on the same pred in + // the packet. + if (PacketSU->Succs[i].getSUnit() == SU && + Hexagon::PredRegsRegClass.contains( + PacketSU->Succs[i].getReg()) && + PacketSU->Succs[i].getKind() == SDep::Data && + // Here I know that *VIN is predicate setting instruction + // with true data dep to candidate on the register + // we care about - c) in the above example. + // Now I need to see if there is an anti dependency + // from c) to any other instruction in the + // same packet on the pred reg of interest + RestrictingDepExistInPacket(*VIN,PacketSU->Succs[i].getReg(), + MIToSUnit)) { + return false; + } + } + } + } + + // If the above case does not apply, check regular + // complement condition. + // Check that the predicate register is the same and + // that the predicate sense is different + // We also need to differentiate .old vs. .new: + // !p0 is not complimentary to p0.new + return ((MI1->getOperand(1).getReg() == MI2->getOperand(1).getReg()) && + (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2, QII)) && + (isDotNewInst(MI1) == isDotNewInst(MI2))); +} + +// initPacketizerState - Initialize packetizer flags +void HexagonPacketizerList::initPacketizerState() { + + Dependence = false; + PromotedToDotNew = false; + GlueToNewValueJump = false; + GlueAllocframeStore = false; + FoundSequentialDependence = false; + + return; +} + +// ignorePseudoInstruction - Ignore bundling of pseudo instructions. +bool HexagonPacketizerList::ignorePseudoInstruction(MachineInstr *MI, + MachineBasicBlock *MBB) { + if (MI->isDebugValue()) + return true; + + // We must print out inline assembly + if (MI->isInlineAsm()) + return false; + + // We check if MI has any functional units mapped to it. + // If it doesn't, we ignore the instruction. + const MCInstrDesc& TID = MI->getDesc(); + unsigned SchedClass = TID.getSchedClass(); + const InstrStage* IS = + ResourceTracker->getInstrItins()->beginStage(SchedClass); + unsigned FuncUnits = IS->getUnits(); + return !FuncUnits; +} + +// isSoloInstruction: - Returns true for instructions that must be +// scheduled in their own packet. +bool HexagonPacketizerList::isSoloInstruction(MachineInstr *MI) { + + if (MI->isInlineAsm()) + return true; + + if (MI->isEHLabel()) + return true; + + // From Hexagon V4 Programmer's Reference Manual 3.4.4 Grouping constraints: + // trap, pause, barrier, icinva, isync, and syncht are solo instructions. + // They must not be grouped with other instructions in a packet. + if (IsSchedBarrier(MI)) + return true; + + return false; +} + +// isLegalToPacketizeTogether: +// SUI is the current instruction that is out side of the current packet. +// SUJ is the current instruction inside the current packet against which that +// SUI will be packetized. +bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { + MachineInstr *I = SUI->getInstr(); + MachineInstr *J = SUJ->getInstr(); + assert(I && J && "Unable to packetize null instruction!"); + + const MCInstrDesc &MCIDI = I->getDesc(); + const MCInstrDesc &MCIDJ = J->getDesc(); + + MachineBasicBlock::iterator II = I; + + const unsigned FrameSize = MF.getFrameInfo()->getStackSize(); + const HexagonRegisterInfo* QRI = + (const HexagonRegisterInfo *) TM.getRegisterInfo(); + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + + // Inline asm cannot go in the packet. + if (I->getOpcode() == Hexagon::INLINEASM) + llvm_unreachable("Should not meet inline asm here!"); + + if (isSoloInstruction(I)) + llvm_unreachable("Should not meet solo instr here!"); + + // A save callee-save register function call can only be in a packet + // with instructions that don't write to the callee-save registers. + if ((QII->isSaveCalleeSavedRegsCall(I) && + DoesModifyCalleeSavedReg(J, QRI)) || + (QII->isSaveCalleeSavedRegsCall(J) && + DoesModifyCalleeSavedReg(I, QRI))) { + Dependence = true; + return false; + } + + // Two control flow instructions cannot go in the same packet. + if (IsControlFlow(I) && IsControlFlow(J)) { + Dependence = true; + return false; + } + + // A LoopN instruction cannot appear in the same packet as a jump or call. + if (IsLoopN(I) && ( IsDirectJump(J) + || MCIDJ.isCall() + || QII->isDeallocRet(J))) { + Dependence = true; + return false; + } + if (IsLoopN(J) && ( IsDirectJump(I) + || MCIDI.isCall() + || QII->isDeallocRet(I))) { + Dependence = true; + return false; + } + + // dealloc_return cannot appear in the same packet as a conditional or + // unconditional jump. + if (QII->isDeallocRet(I) && ( MCIDJ.isBranch() + || MCIDJ.isCall() + || MCIDJ.isBarrier())) { + Dependence = true; + return false; + } + + + // V4 allows dual store. But does not allow second store, if the + // first store is not in SLOT0. New value store, new value jump, + // dealloc_return and memop always take SLOT0. + // Arch spec 3.4.4.2 + if (QRI->Subtarget.hasV4TOps()) { + + if (MCIDI.mayStore() && MCIDJ.mayStore() && isNewValueInst(J)) { + Dependence = true; + return false; + } + + if ( (QII->isMemOp(J) && MCIDI.mayStore()) + || (MCIDJ.mayStore() && QII->isMemOp(I)) + || (QII->isMemOp(J) && QII->isMemOp(I))) { + Dependence = true; + return false; + } + + //if dealloc_return + if (MCIDJ.mayStore() && QII->isDeallocRet(I)){ + Dependence = true; + return false; + } + + // If an instruction feeds new value jump, glue it. + MachineBasicBlock::iterator NextMII = I; + ++NextMII; + MachineInstr *NextMI = NextMII; + + if (QII->isNewValueJump(NextMI)) { + + bool secondRegMatch = false; + bool maintainNewValueJump = false; + + if (NextMI->getOperand(1).isReg() && + I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) { + secondRegMatch = true; + maintainNewValueJump = true; + } + + if (!secondRegMatch && + I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) { + maintainNewValueJump = true; + } + + for (std::vector<MachineInstr*>::iterator + VI = CurrentPacketMIs.begin(), + VE = CurrentPacketMIs.end(); + (VI != VE && maintainNewValueJump); ++VI) { + SUnit* PacketSU = MIToSUnit[*VI]; + + // NVJ can not be part of the dual jump - Arch Spec: section 7.8 + if (PacketSU->getInstr()->getDesc().isCall()) { + Dependence = true; + break; + } + // Validate + // 1. Packet does not have a store in it. + // 2. If the first operand of the nvj is newified, and the second + // operand is also a reg, it (second reg) is not defined in + // the same packet. + // 3. If the second operand of the nvj is newified, (which means + // first operand is also a reg), first reg is not defined in + // the same packet. + if (PacketSU->getInstr()->getDesc().mayStore() || + PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME || + // Check #2. + (!secondRegMatch && NextMI->getOperand(1).isReg() && + PacketSU->getInstr()->modifiesRegister( + NextMI->getOperand(1).getReg(), QRI)) || + // Check #3. + (secondRegMatch && + PacketSU->getInstr()->modifiesRegister( + NextMI->getOperand(0).getReg(), QRI))) { + Dependence = true; + break; + } + } + if (!Dependence) + GlueToNewValueJump = true; + else + return false; + } + } + + if (SUJ->isSucc(SUI)) { + for (unsigned i = 0; + (i < SUJ->Succs.size()) && !FoundSequentialDependence; + ++i) { + + if (SUJ->Succs[i].getSUnit() != SUI) { + continue; + } + + SDep::Kind DepType = SUJ->Succs[i].getKind(); + + // For direct calls: + // Ignore register dependences for call instructions for + // packetization purposes except for those due to r31 and + // predicate registers. + // + // For indirect calls: + // Same as direct calls + check for true dependences to the register + // used in the indirect call. + // + // We completely ignore Order dependences for call instructions + // + // For returns: + // Ignore register dependences for return instructions like jumpr, + // dealloc return unless we have dependencies on the explicit uses + // of the registers used by jumpr (like r31) or dealloc return + // (like r29 or r30). + // + // TODO: Currently, jumpr is handling only return of r31. So, the + // following logic (specificaly IsCallDependent) is working fine. + // We need to enable jumpr for register other than r31 and then, + // we need to rework the last part, where it handles indirect call + // of that (IsCallDependent) function. Bug 6216 is opened for this. + // + unsigned DepReg = 0; + const TargetRegisterClass* RC = NULL; + if (DepType == SDep::Data) { + DepReg = SUJ->Succs[i].getReg(); + RC = QRI->getMinimalPhysRegClass(DepReg); + } + if ((MCIDI.isCall() || MCIDI.isReturn()) && + (!IsRegDependence(DepType) || + !IsCallDependent(I, DepType, SUJ->Succs[i].getReg()))) { + /* do nothing */ + } + + // For instructions that can be promoted to dot-new, try to promote. + else if ((DepType == SDep::Data) && + CanPromoteToDotNew(I, SUJ, DepReg, MIToSUnit, II, RC) && + PromoteToDotNew(I, DepType, II, RC)) { + PromotedToDotNew = true; + /* do nothing */ + } + + else if ((DepType == SDep::Data) && + (QII->isNewValueJump(I))) { + /* do nothing */ + } + + // For predicated instructions, if the predicates are complements + // then there can be no dependence. + else if (QII->isPredicated(I) && + QII->isPredicated(J) && + ArePredicatesComplements(I, J, MIToSUnit)) { + /* do nothing */ + + } + else if (IsDirectJump(I) && + !MCIDJ.isBranch() && + !MCIDJ.isCall() && + (DepType == SDep::Order)) { + // Ignore Order dependences between unconditional direct branches + // and non-control-flow instructions + /* do nothing */ + } + else if (MCIDI.isConditionalBranch() && (DepType != SDep::Data) && + (DepType != SDep::Output)) { + // Ignore all dependences for jumps except for true and output + // dependences + /* do nothing */ + } + + // Ignore output dependences due to superregs. We can + // write to two different subregisters of R1:0 for instance + // in the same cycle + // + + // + // Let the + // If neither I nor J defines DepReg, then this is a + // superfluous output dependence. The dependence must be of the + // form: + // R0 = ... + // R1 = ... + // and there is an output dependence between the two instructions + // with + // DepReg = D0 + // We want to ignore these dependences. + // Ideally, the dependence constructor should annotate such + // dependences. We can then avoid this relatively expensive check. + // + else if (DepType == SDep::Output) { + // DepReg is the register that's responsible for the dependence. + unsigned DepReg = SUJ->Succs[i].getReg(); + + // Check if I and J really defines DepReg. + if (I->definesRegister(DepReg) || + J->definesRegister(DepReg)) { + FoundSequentialDependence = true; + break; + } + } + + // We ignore Order dependences for + // 1. Two loads unless they are volatile. + // 2. Two stores in V4 unless they are volatile. + else if ((DepType == SDep::Order) && + !I->hasVolatileMemoryRef() && + !J->hasVolatileMemoryRef()) { + if (QRI->Subtarget.hasV4TOps() && + // hexagonv4 allows dual store. + MCIDI.mayStore() && MCIDJ.mayStore()) { + /* do nothing */ + } + // store followed by store-- not OK on V2 + // store followed by load -- not OK on all (OK if addresses + // are not aliased) + // load followed by store -- OK on all + // load followed by load -- OK on all + else if ( !MCIDJ.mayStore()) { + /* do nothing */ + } + else { + FoundSequentialDependence = true; + break; + } + } + + // For V4, special case ALLOCFRAME. Even though there is dependency + // between ALLOCAFRAME and subsequent store, allow it to be + // packetized in a same packet. This implies that the store is using + // caller's SP. Hense, offset needs to be updated accordingly. + else if (DepType == SDep::Data + && QRI->Subtarget.hasV4TOps() + && J->getOpcode() == Hexagon::ALLOCFRAME + && (I->getOpcode() == Hexagon::STrid + || I->getOpcode() == Hexagon::STriw + || I->getOpcode() == Hexagon::STrib) + && I->getOperand(0).getReg() == QRI->getStackRegister() + && QII->isValidOffset(I->getOpcode(), + I->getOperand(1).getImm() - + (FrameSize + HEXAGON_LRFP_SIZE))) + { + GlueAllocframeStore = true; + // Since this store is to be glued with allocframe in the same + // packet, it will use SP of the previous stack frame, i.e + // caller's SP. Therefore, we need to recalculate offset according + // to this change. + I->getOperand(1).setImm(I->getOperand(1).getImm() - + (FrameSize + HEXAGON_LRFP_SIZE)); + } + + // + // Skip over anti-dependences. Two instructions that are + // anti-dependent can share a packet + // + else if (DepType != SDep::Anti) { + FoundSequentialDependence = true; + break; + } + } + + if (FoundSequentialDependence) { + Dependence = true; + return false; + } + } + + return true; +} + +// isLegalToPruneDependencies +bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) { + MachineInstr *I = SUI->getInstr(); + assert(I && SUJ->getInstr() && "Unable to packetize null instruction!"); + + const unsigned FrameSize = MF.getFrameInfo()->getStackSize(); + + if (Dependence) { + + // Check if the instruction was promoted to a dot-new. If so, demote it + // back into a dot-old. + if (PromotedToDotNew) { + DemoteToDotOld(I); + } + + // Check if the instruction (must be a store) was glued with an Allocframe + // instruction. If so, restore its offset to its original value, i.e. use + // curent SP instead of caller's SP. + if (GlueAllocframeStore) { + I->getOperand(1).setImm(I->getOperand(1).getImm() + + FrameSize + HEXAGON_LRFP_SIZE); + } + + return false; + } + return true; +} + +MachineBasicBlock::iterator +HexagonPacketizerList::addToPacket(MachineInstr *MI) { + + MachineBasicBlock::iterator MII = MI; + MachineBasicBlock *MBB = MI->getParent(); + + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; + + if (GlueToNewValueJump) { + + ++MII; + MachineInstr *nvjMI = MII; + assert(ResourceTracker->canReserveResources(MI)); + ResourceTracker->reserveResources(MI); + if (QII->isExtended(MI) && + !tryAllocateResourcesForConstExt(MI)) { + endPacket(MBB, MI); + ResourceTracker->reserveResources(MI); + assert(canReserveResourcesForConstExt(MI) && + "Ensure that there is a slot"); + reserveResourcesForConstExt(MI); + // Reserve resources for new value jump constant extender. + assert(canReserveResourcesForConstExt(MI) && + "Ensure that there is a slot"); + reserveResourcesForConstExt(nvjMI); + assert(ResourceTracker->canReserveResources(nvjMI) && + "Ensure that there is a slot"); + + } else if ( // Extended instruction takes two slots in the packet. + // Try reserve and allocate 4-byte in the current packet first. + (QII->isExtended(nvjMI) + && (!tryAllocateResourcesForConstExt(nvjMI) + || !ResourceTracker->canReserveResources(nvjMI))) + || // For non-extended instruction, no need to allocate extra 4 bytes. + (!QII->isExtended(nvjMI) && + !ResourceTracker->canReserveResources(nvjMI))) + { + endPacket(MBB, MI); + // A new and empty packet starts. + // We are sure that the resources requirements can be satisfied. + // Therefore, do not need to call "canReserveResources" anymore. + ResourceTracker->reserveResources(MI); + if (QII->isExtended(nvjMI)) + reserveResourcesForConstExt(nvjMI); + } + // Here, we are sure that "reserveResources" would succeed. + ResourceTracker->reserveResources(nvjMI); + CurrentPacketMIs.push_back(MI); + CurrentPacketMIs.push_back(nvjMI); + } else { + if ( QII->isExtended(MI) + && ( !tryAllocateResourcesForConstExt(MI) + || !ResourceTracker->canReserveResources(MI))) + { + endPacket(MBB, MI); + // Check if the instruction was promoted to a dot-new. If so, demote it + // back into a dot-old + if (PromotedToDotNew) { + DemoteToDotOld(MI); + } + reserveResourcesForConstExt(MI); + } + // In case that "MI" is not an extended insn, + // the resource availability has already been checked. + ResourceTracker->reserveResources(MI); + CurrentPacketMIs.push_back(MI); + } + return MII; +} + +//===----------------------------------------------------------------------===// +// Public Constructor Functions +//===----------------------------------------------------------------------===// + +FunctionPass *llvm::createHexagonPacketizer() { + return new HexagonPacketizer(); +} + diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp index 47384cd..035afe8 100644 --- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp +++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp @@ -15,6 +15,7 @@ #include "Hexagon.h" #include "HexagonAsmPrinter.h" #include "HexagonInstPrinter.h" +#include "HexagonMCInst.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCExpr.h" @@ -37,20 +38,50 @@ StringRef HexagonInstPrinter::getRegName(unsigned RegNo) const { void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) { + printInst((const HexagonMCInst*)(MI), O, Annot); +} + +void HexagonInstPrinter::printInst(const HexagonMCInst *MI, raw_ostream &O, + StringRef Annot) { const char packetPadding[] = " "; const char startPacket = '{', endPacket = '}'; // TODO: add outer HW loop when it's supported too. if (MI->getOpcode() == Hexagon::ENDLOOP0) { - MCInst Nop; + // Ending a harware loop is different from ending an regular packet. + assert(MI->isEndPacket() && "Loop end must also end the packet"); + + if (MI->isStartPacket()) { + // There must be a packet to end a loop. + // FIXME: when shuffling is always run, this shouldn't be needed. + HexagonMCInst Nop; + StringRef NoAnnot; + + Nop.setOpcode (Hexagon::NOP); + Nop.setStartPacket (MI->isStartPacket()); + printInst (&Nop, O, NoAnnot); + } + + // Close the packet. + if (MI->isEndPacket()) + O << packetPadding << endPacket; - O << packetPadding << startPacket << '\n'; - Nop.setOpcode(Hexagon::NOP); - printInstruction(&Nop, O); - O << packetPadding << endPacket; + printInstruction(MI, O); + } + else { + // Prefix the insn opening the packet. + if (MI->isStartPacket()) + O << packetPadding << startPacket << '\n'; + + printInstruction(MI, O); + + // Suffix the insn closing the packet. + if (MI->isEndPacket()) + // Suffix the packet in a new line always, since the GNU assembler has + // issues with a closing brace on the same line as CONST{32,64}. + O << '\n' << packetPadding << endPacket; } - printInstruction(MI, O); printAnnotation(O, Annot); } @@ -65,22 +96,22 @@ void HexagonInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, } else if(MO.isImm()) { printImmOperand(MI, OpNo, O); } else { - assert(false && "Unknown operand"); + llvm_unreachable("Unknown operand"); } } -void HexagonInstPrinter::printImmOperand - (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { +void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) const { O << MI->getOperand(OpNo).getImm(); } void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) const { + raw_ostream &O) const { O << MI->getOperand(OpNo).getImm(); } -void HexagonInstPrinter::printUnsignedImmOperand - (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { +void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI, + unsigned OpNo, raw_ostream &O) const { O << MI->getOperand(OpNo).getImm(); } @@ -89,13 +120,13 @@ void HexagonInstPrinter::printNegImmOperand(const MCInst *MI, unsigned OpNo, O << -MI->getOperand(OpNo).getImm(); } -void HexagonInstPrinter::printNOneImmOperand - (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { +void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) const { O << -1; } -void HexagonInstPrinter::printMEMriOperand - (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { +void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) const { const MCOperand& MO0 = MI->getOperand(OpNo); const MCOperand& MO1 = MI->getOperand(OpNo + 1); @@ -103,8 +134,8 @@ void HexagonInstPrinter::printMEMriOperand O << " + #" << MO1.getImm(); } -void HexagonInstPrinter::printFrameIndexOperand - (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { +void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) const { const MCOperand& MO0 = MI->getOperand(OpNo); const MCOperand& MO1 = MI->getOperand(OpNo + 1); diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h index dad4334..902a323 100644 --- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h +++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h @@ -14,6 +14,7 @@ #ifndef HEXAGONINSTPRINTER_H #define HEXAGONINSTPRINTER_H +#include "HexagonMCInst.h" #include "llvm/MC/MCInstPrinter.h" namespace llvm { @@ -25,6 +26,7 @@ namespace llvm { : MCInstPrinter(MAI, MII, MRI) {} virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot); + void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot); virtual StringRef getOpcodeName(unsigned Opcode) const; void printInstruction(const MCInst *MI, raw_ostream &O); StringRef getRegName(unsigned RegNo) const; @@ -33,16 +35,16 @@ namespace llvm { void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; void printImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; void printExtOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; - void printUnsignedImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) - const; + void printUnsignedImmOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) const; void printNegImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; void printNOneImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; void printMEMriOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; - void printFrameIndexOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) - const; + void printFrameIndexOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) const; void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; void printCallOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) @@ -55,7 +57,8 @@ namespace llvm { const; void printJumpTable(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; - void printConstantPool(const MCInst *MI, unsigned OpNo, raw_ostream &O) const; + void printConstantPool(const MCInst *MI, unsigned OpNo, + raw_ostream &O) const; void printSymbolHi(const MCInst *MI, unsigned OpNo, raw_ostream &O) const { printSymbol(MI, OpNo, O, true); } diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h index ed55c3c..7221e90 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h @@ -23,14 +23,41 @@ namespace llvm { /// instruction info tracks. /// namespace HexagonII { - // *** The code below must match HexagonInstrFormat*.td *** // + // Insn types. + // *** Must match HexagonInstrFormat*.td *** + enum Type { + TypePSEUDO = 0, + TypeALU32 = 1, + TypeCR = 2, + TypeJR = 3, + TypeJ = 4, + TypeLD = 5, + TypeST = 6, + TypeSYSTEM = 7, + TypeXTYPE = 8, + TypeMEMOP = 9, + TypeNV = 10, + TypePREFIX = 30, // Such as extenders. + TypeMARKER = 31 // Such as end of a HW loop. + }; + + + // MCInstrDesc TSFlags + // *** Must match HexagonInstrFormat*.td *** enum { + // This 5-bit field describes the insn type. + TypePos = 0, + TypeMask = 0x1f, + + // Solo instructions. + SoloPos = 5, + SoloMask = 0x1, // Predicated instructions. - PredicatedPos = 1, + PredicatedPos = 6, PredicatedMask = 0x1 }; |