diff options
Diffstat (limited to 'contrib/llvm/lib/Target/SystemZ')
53 files changed, 7433 insertions, 729 deletions
diff --git a/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp index a94717c..3368078 100644 --- a/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp +++ b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp @@ -9,15 +9,30 @@ #include "MCTargetDesc/SystemZMCTargetDesc.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstBuilder.h" +#include "llvm/MC/MCParser/MCAsmLexer.h" +#include "llvm/MC/MCParser/MCAsmParser.h" +#include "llvm/MC/MCParser/MCAsmParserExtension.h" #include "llvm/MC/MCParser/MCParsedAsmOperand.h" #include "llvm/MC/MCParser/MCTargetAsmParser.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SMLoc.h" #include "llvm/Support/TargetRegistry.h" +#include <algorithm> +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <iterator> +#include <memory> +#include <string> using namespace llvm; @@ -31,6 +46,7 @@ static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) { } namespace { + enum RegisterKind { GR32Reg, GRH32Reg, @@ -45,6 +61,7 @@ enum RegisterKind { VR64Reg, VR128Reg, AR32Reg, + CR64Reg, }; enum MemoryKind { @@ -56,7 +73,6 @@ enum MemoryKind { }; class SystemZOperand : public MCParsedAsmOperand { -public: private: enum OperandKind { KindInvalid, @@ -140,12 +156,14 @@ public: SMLoc EndLoc) { return make_unique<SystemZOperand>(KindInvalid, StartLoc, EndLoc); } + static std::unique_ptr<SystemZOperand> createToken(StringRef Str, SMLoc Loc) { auto Op = make_unique<SystemZOperand>(KindToken, Loc, Loc); Op->Token.Data = Str.data(); Op->Token.Length = Str.size(); return Op; } + static std::unique_ptr<SystemZOperand> createReg(RegisterKind Kind, unsigned Num, SMLoc StartLoc, SMLoc EndLoc) { auto Op = make_unique<SystemZOperand>(KindReg, StartLoc, EndLoc); @@ -153,12 +171,14 @@ public: Op->Reg.Num = Num; return Op; } + static std::unique_ptr<SystemZOperand> createImm(const MCExpr *Expr, SMLoc StartLoc, SMLoc EndLoc) { auto Op = make_unique<SystemZOperand>(KindImm, StartLoc, EndLoc); Op->Imm = Expr; return Op; } + static std::unique_ptr<SystemZOperand> createMem(MemoryKind MemKind, RegisterKind RegKind, unsigned Base, const MCExpr *Disp, unsigned Index, const MCExpr *LengthImm, @@ -175,6 +195,7 @@ public: Op->Mem.Length.Reg = LengthReg; return Op; } + static std::unique_ptr<SystemZOperand> createImmTLS(const MCExpr *Imm, const MCExpr *Sym, SMLoc StartLoc, SMLoc EndLoc) { @@ -242,6 +263,9 @@ public: bool isMemDisp20(MemoryKind MemKind, RegisterKind RegKind) const { return isMem(MemKind, RegKind) && inRange(Mem.Disp, -524288, 524287); } + bool isMemDisp12Len4(RegisterKind RegKind) const { + return isMemDisp12(BDLMem, RegKind) && inRange(Mem.Length.Imm, 1, 0x10); + } bool isMemDisp12Len8(RegisterKind RegKind) const { return isMemDisp12(BDLMem, RegKind) && inRange(Mem.Length.Imm, 1, 0x100); } @@ -251,6 +275,10 @@ public: SMLoc getEndLoc() const override { return EndLoc; } void print(raw_ostream &OS) const override; + /// getLocRange - Get the range between the first and last token of this + /// operand. + SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } + // Used by the TableGen code to add particular types of operand // to an instruction. void addRegOperands(MCInst &Inst, unsigned N) const { @@ -320,6 +348,7 @@ public: bool isVF128() const { return false; } bool isVR128() const { return isReg(VR128Reg); } bool isAR32() const { return isReg(AR32Reg); } + bool isCR64() const { return isReg(CR64Reg); } bool isAnyReg() const { return (isReg() || isImm(0, 15)); } bool isBDAddr32Disp12() const { return isMemDisp12(BDMem, ADDR32Reg); } bool isBDAddr32Disp20() const { return isMemDisp20(BDMem, ADDR32Reg); } @@ -327,6 +356,7 @@ public: bool isBDAddr64Disp20() const { return isMemDisp20(BDMem, ADDR64Reg); } bool isBDXAddr64Disp12() const { return isMemDisp12(BDXMem, ADDR64Reg); } bool isBDXAddr64Disp20() const { return isMemDisp20(BDXMem, ADDR64Reg); } + bool isBDLAddr64Disp12Len4() const { return isMemDisp12Len4(ADDR64Reg); } bool isBDLAddr64Disp12Len8() const { return isMemDisp12Len8(ADDR64Reg); } bool isBDRAddr64Disp12() const { return isMemDisp12(BDRMem, ADDR64Reg); } bool isBDVAddr64Disp12() const { return isMemDisp12(BDVMem, ADDR64Reg); } @@ -355,7 +385,8 @@ private: RegGR, RegFP, RegV, - RegAR + RegAR, + RegCR }; struct Register { RegisterGroup Group; @@ -463,6 +494,9 @@ public: OperandMatchResultTy parseAR32(OperandVector &Operands) { return parseRegister(Operands, RegAR, SystemZMC::AR32Regs, AR32Reg); } + OperandMatchResultTy parseCR64(OperandVector &Operands) { + return parseRegister(Operands, RegCR, SystemZMC::CR64Regs, CR64Reg); + } OperandMatchResultTy parseAnyReg(OperandVector &Operands) { return parseAnyRegister(Operands); } @@ -503,6 +537,7 @@ public: return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1, true); } }; + } // end anonymous namespace #define GET_REGISTER_MATCHER @@ -623,6 +658,8 @@ bool SystemZAsmParser::parseRegister(Register &Reg) { Reg.Group = RegV; else if (Prefix == 'a' && Reg.Num < 16) Reg.Group = RegAR; + else if (Prefix == 'c' && Reg.Num < 16) + Reg.Group = RegCR; else return Error(Reg.StartLoc, "invalid register"); @@ -716,6 +753,10 @@ SystemZAsmParser::parseAnyRegister(OperandVector &Operands) { Kind = AR32Reg; RegNo = SystemZMC::AR32Regs[Reg.Num]; } + else if (Reg.Group == RegCR) { + Kind = CR64Reg; + RegNo = SystemZMC::CR64Regs[Reg.Num]; + } else { return MatchOperand_ParseFail; } @@ -1031,6 +1072,8 @@ bool SystemZAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, RegNo = SystemZMC::VR128Regs[Reg.Num]; else if (Reg.Group == RegAR) RegNo = SystemZMC::AR32Regs[Reg.Num]; + else if (Reg.Group == RegCR) + RegNo = SystemZMC::CR64Regs[Reg.Num]; StartLoc = Reg.StartLoc; EndLoc = Reg.EndLoc; return false; @@ -1125,6 +1168,8 @@ bool SystemZAsmParser::parseOperand(OperandVector &Operands, return false; } +std::string SystemZMnemonicSpellCheck(StringRef S, uint64_t FBS); + bool SystemZAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, @@ -1170,8 +1215,13 @@ bool SystemZAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(ErrorLoc, "invalid operand for instruction"); } - case Match_MnemonicFail: - return Error(IDLoc, "invalid instruction"); + case Match_MnemonicFail: { + uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits()); + std::string Suggestion = SystemZMnemonicSpellCheck( + ((SystemZOperand &)*Operands[0]).getToken(), FBS); + return Error(IDLoc, "invalid instruction" + Suggestion, + ((SystemZOperand &)*Operands[0]).getLocRange()); + } } llvm_unreachable("Unexpected match type"); diff --git a/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp index 1806e01..8903b57 100644 --- a/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp +++ b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp @@ -7,12 +7,16 @@ // //===----------------------------------------------------------------------===// +#include "MCTargetDesc/SystemZMCTargetDesc.h" #include "SystemZ.h" #include "llvm/MC/MCDisassembler/MCDisassembler.h" #include "llvm/MC/MCFixedLenDisassembler.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/TargetRegistry.h" +#include <cassert> +#include <cstdint> using namespace llvm; @@ -21,17 +25,19 @@ using namespace llvm; typedef MCDisassembler::DecodeStatus DecodeStatus; namespace { + class SystemZDisassembler : public MCDisassembler { public: SystemZDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) : MCDisassembler(STI, Ctx) {} - ~SystemZDisassembler() override {} + ~SystemZDisassembler() override = default; DecodeStatus getInstruction(MCInst &instr, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t Address, raw_ostream &VStream, raw_ostream &CStream) const override; }; + } // end anonymous namespace static MCDisassembler *createSystemZDisassembler(const Target &T, @@ -156,6 +162,12 @@ static DecodeStatus DecodeAR32BitRegisterClass(MCInst &Inst, uint64_t RegNo, return decodeRegisterClass(Inst, RegNo, SystemZMC::AR32Regs, 16); } +static DecodeStatus DecodeCR64BitRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + return decodeRegisterClass(Inst, RegNo, SystemZMC::CR64Regs, 16); +} + template<unsigned N> static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm) { if (!isUInt<N>(Imm)) @@ -321,6 +333,18 @@ static DecodeStatus decodeBDXAddr20Operand(MCInst &Inst, uint64_t Field, return MCDisassembler::Success; } +static DecodeStatus decodeBDLAddr12Len4Operand(MCInst &Inst, uint64_t Field, + const unsigned *Regs) { + uint64_t Length = Field >> 16; + uint64_t Base = (Field >> 12) & 0xf; + uint64_t Disp = Field & 0xfff; + assert(Length < 16 && "Invalid BDLAddr12Len4"); + Inst.addOperand(MCOperand::createReg(Base == 0 ? 0 : Regs[Base])); + Inst.addOperand(MCOperand::createImm(Disp)); + Inst.addOperand(MCOperand::createImm(Length + 1)); + return MCDisassembler::Success; +} + static DecodeStatus decodeBDLAddr12Len8Operand(MCInst &Inst, uint64_t Field, const unsigned *Regs) { uint64_t Length = Field >> 16; @@ -393,6 +417,13 @@ static DecodeStatus decodeBDXAddr64Disp20Operand(MCInst &Inst, uint64_t Field, return decodeBDXAddr20Operand(Inst, Field, SystemZMC::GR64Regs); } +static DecodeStatus decodeBDLAddr64Disp12Len4Operand(MCInst &Inst, + uint64_t Field, + uint64_t Address, + const void *Decoder) { + return decodeBDLAddr12Len4Operand(Inst, Field, SystemZMC::GR64Regs); +} + static DecodeStatus decodeBDLAddr64Disp12Len8Operand(MCInst &Inst, uint64_t Field, uint64_t Address, diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp index 1207c7b..6cd12e1 100644 --- a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp +++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp @@ -1,4 +1,4 @@ -//===-- SystemZInstPrinter.cpp - Convert SystemZ MCInst to assembly syntax ===// +//===- SystemZInstPrinter.cpp - Convert SystemZ MCInst to assembly syntax -===// // // The LLVM Compiler Infrastructure // @@ -10,10 +10,13 @@ #include "SystemZInstPrinter.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" -#include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCSymbol.h" +#include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" +#include <cassert> +#include <cstdint> using namespace llvm; diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h index 6336f5e..d65c661 100644 --- a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h +++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h @@ -15,8 +15,10 @@ #define LLVM_LIB_TARGET_SYSTEMZ_INSTPRINTER_SYSTEMZINSTPRINTER_H #include "llvm/MC/MCInstPrinter.h" +#include <cstdint> namespace llvm { + class MCOperand; class SystemZInstPrinter : public MCInstPrinter { @@ -70,6 +72,7 @@ private: // This forms part of the instruction name rather than the operand list. void printCond4Operand(const MCInst *MI, int OpNum, raw_ostream &O); }; + } // end namespace llvm -#endif +#endif // LLVM_LIB_TARGET_SYSTEMZ_INSTPRINTER_SYSTEMZINSTPRINTER_H diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp index 9192448..51ac410 100644 --- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp +++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp @@ -7,8 +7,8 @@ // //===----------------------------------------------------------------------===// -#include "MCTargetDesc/SystemZMCTargetDesc.h" #include "MCTargetDesc/SystemZMCFixups.h" +#include "MCTargetDesc/SystemZMCTargetDesc.h" #include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCELFObjectWriter.h" #include "llvm/MC/MCFixupKindInfo.h" @@ -50,8 +50,9 @@ public: return SystemZ::NumTargetFixupKinds; } const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override; - void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, - uint64_t Value, bool IsPCRel) const override; + void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, + const MCValue &Target, MutableArrayRef<char> Data, + uint64_t Value, bool IsResolved) const override; bool mayNeedRelaxation(const MCInst &Inst) const override { return false; } @@ -89,15 +90,17 @@ SystemZMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { return Infos[Kind - FirstTargetFixupKind]; } -void SystemZMCAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, - unsigned DataSize, uint64_t Value, - bool IsPCRel) const { +void SystemZMCAsmBackend::applyFixup(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target, + MutableArrayRef<char> Data, uint64_t Value, + bool IsResolved) const { MCFixupKind Kind = Fixup.getKind(); unsigned Offset = Fixup.getOffset(); unsigned BitSize = getFixupKindInfo(Kind).TargetSize; unsigned Size = (BitSize + 7) / 8; - assert(Offset + Size <= DataSize && "Invalid fixup offset!"); + assert(Offset + Size <= Data.size() && "Invalid fixup offset!"); // Big-endian insertion of Size bytes. Value = extractBitsForFixup(Kind, Value); diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp index b17977d..6e00981 100644 --- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp +++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp @@ -14,7 +14,7 @@ using namespace llvm; SystemZMCAsmInfo::SystemZMCAsmInfo(const Triple &TT) { - PointerSize = 8; + CodePointerSize = 8; CalleeSaveStackSlotSize = 8; IsLittleEndian = false; diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp index 7082aba..d188f56 100644 --- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp +++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp @@ -11,20 +11,28 @@ // //===----------------------------------------------------------------------===// -#include "MCTargetDesc/SystemZMCTargetDesc.h" #include "MCTargetDesc/SystemZMCFixups.h" +#include "MCTargetDesc/SystemZMCTargetDesc.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include <cassert> +#include <cstdint> using namespace llvm; #define DEBUG_TYPE "mccodeemitter" namespace { + class SystemZMCCodeEmitter : public MCCodeEmitter { const MCInstrInfo &MCII; MCContext &Ctx; @@ -34,7 +42,7 @@ public: : MCII(mcii), Ctx(ctx) { } - ~SystemZMCCodeEmitter() override {} + ~SystemZMCCodeEmitter() override = default; // OVerride MCCodeEmitter. void encodeInstruction(const MCInst &MI, raw_ostream &OS, @@ -69,6 +77,9 @@ private: uint64_t getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const; + uint64_t getBDLAddr12Len4Encoding(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const; uint64_t getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const; @@ -137,13 +148,8 @@ private: void verifyInstructionPredicates(const MCInst &MI, uint64_t AvailableFeatures) const; }; -} // end anonymous namespace -MCCodeEmitter *llvm::createSystemZMCCodeEmitter(const MCInstrInfo &MCII, - const MCRegisterInfo &MRI, - MCContext &Ctx) { - return new SystemZMCCodeEmitter(MCII, Ctx); -} +} // end anonymous namespace void SystemZMCCodeEmitter:: encodeInstruction(const MCInst &MI, raw_ostream &OS, @@ -217,6 +223,17 @@ getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum, } uint64_t SystemZMCCodeEmitter:: +getBDLAddr12Len4Encoding(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups, + const MCSubtargetInfo &STI) const { + uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI); + uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI); + uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups, STI) - 1; + assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<4>(Len)); + return (Len << 16) | (Base << 12) | Disp; +} + +uint64_t SystemZMCCodeEmitter:: getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum, SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { @@ -282,3 +299,9 @@ SystemZMCCodeEmitter::getPCRelEncoding(const MCInst &MI, unsigned OpNum, #define ENABLE_INSTR_PREDICATE_VERIFIER #include "SystemZGenMCCodeEmitter.inc" + +MCCodeEmitter *llvm::createSystemZMCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + MCContext &Ctx) { + return new SystemZMCCodeEmitter(MCII, Ctx); +} diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp index 43a96e8..df0a816 100644 --- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp +++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp @@ -7,35 +7,38 @@ // //===----------------------------------------------------------------------===// -#include "MCTargetDesc/SystemZMCTargetDesc.h" #include "MCTargetDesc/SystemZMCFixups.h" +#include "MCTargetDesc/SystemZMCTargetDesc.h" +#include "llvm/BinaryFormat/ELF.h" #include "llvm/MC/MCELFObjectWriter.h" #include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" #include "llvm/MC/MCValue.h" +#include "llvm/Support/ErrorHandling.h" +#include <cassert> +#include <cstdint> using namespace llvm; namespace { + class SystemZObjectWriter : public MCELFObjectTargetWriter { public: SystemZObjectWriter(uint8_t OSABI); - - ~SystemZObjectWriter() override; + ~SystemZObjectWriter() override = default; protected: // Override MCELFObjectTargetWriter. unsigned getRelocType(MCContext &Ctx, const MCValue &Target, const MCFixup &Fixup, bool IsPCRel) const override; }; + } // end anonymous namespace SystemZObjectWriter::SystemZObjectWriter(uint8_t OSABI) : MCELFObjectTargetWriter(/*Is64Bit=*/true, OSABI, ELF::EM_S390, /*HasRelocationAddend=*/ true) {} -SystemZObjectWriter::~SystemZObjectWriter() { -} - // Return the relocation type for an absolute value of MCFixupKind Kind. static unsigned getAbsoluteReloc(unsigned Kind) { switch (Kind) { diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp index dfea7e3..727ab92 100644 --- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp +++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp @@ -116,6 +116,13 @@ const unsigned SystemZMC::AR32Regs[16] = { SystemZ::A12, SystemZ::A13, SystemZ::A14, SystemZ::A15 }; +const unsigned SystemZMC::CR64Regs[16] = { + SystemZ::C0, SystemZ::C1, SystemZ::C2, SystemZ::C3, + SystemZ::C4, SystemZ::C5, SystemZ::C6, SystemZ::C7, + SystemZ::C8, SystemZ::C9, SystemZ::C10, SystemZ::C11, + SystemZ::C12, SystemZ::C13, SystemZ::C14, SystemZ::C15 +}; + unsigned SystemZMC::getFirstReg(unsigned Reg) { static unsigned Map[SystemZ::NUM_TARGET_REGS]; static bool Initialized = false; diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h index d9926c7..dbca348 100644 --- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h +++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h @@ -55,6 +55,7 @@ extern const unsigned VR32Regs[32]; extern const unsigned VR64Regs[32]; extern const unsigned VR128Regs[32]; extern const unsigned AR32Regs[16]; +extern const unsigned CR64Regs[16]; // Return the 0-based number of the first architectural register that // contains the given LLVM register. E.g. R1D -> 1. diff --git a/contrib/llvm/lib/Target/SystemZ/README.txt b/contrib/llvm/lib/Target/SystemZ/README.txt index 86a1322..9b71415 100644 --- a/contrib/llvm/lib/Target/SystemZ/README.txt +++ b/contrib/llvm/lib/Target/SystemZ/README.txt @@ -63,7 +63,12 @@ via a register.) -- -We don't use ICM or STCM. +We don't use ICM, STCM, or CLM. + +-- + +We don't use ADD (LOGICAL) HIGH, SUBTRACT (LOGICAL) HIGH, +or COMPARE (LOGICAL) HIGH yet. -- diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.td b/contrib/llvm/lib/Target/SystemZ/SystemZ.td index 6bdfd4d..41300a1 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZ.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZ.td @@ -54,6 +54,9 @@ include "SystemZInstrFormats.td" include "SystemZInstrInfo.td" include "SystemZInstrVector.td" include "SystemZInstrFP.td" +include "SystemZInstrHFP.td" +include "SystemZInstrDFP.td" +include "SystemZInstrSystem.td" def SystemZInstrInfo : InstrInfo {} diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp index b4c843f..d70f9e9 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -13,15 +13,23 @@ // //===----------------------------------------------------------------------===// +#include "SystemZ.h" +#include "SystemZInstrInfo.h" #include "SystemZTargetMachine.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/IR/Function.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetMachine.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/MC/MCInstrDesc.h" #include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" +#include <cassert> +#include <cstdint> using namespace llvm; @@ -33,11 +41,11 @@ STATISTIC(EliminatedComparisons, "Number of eliminated comparisons"); STATISTIC(FusedComparisons, "Number of fused compare-and-branch instructions"); namespace { + // Represents the references to a particular register in one or more // instructions. struct Reference { - Reference() - : Def(false), Use(false) {} + Reference() = default; Reference &operator|=(const Reference &Other) { Def |= Other.Def; @@ -49,15 +57,16 @@ struct Reference { // True if the register is defined or used in some form, either directly or // via a sub- or super-register. - bool Def; - bool Use; + bool Def = false; + bool Use = false; }; class SystemZElimCompare : public MachineFunctionPass { public: static char ID; + SystemZElimCompare(const SystemZTargetMachine &tm) - : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr) {} + : MachineFunctionPass(ID) {} StringRef getPassName() const override { return "SystemZ Comparison Elimination"; @@ -65,6 +74,7 @@ public: bool processBlock(MachineBasicBlock &MBB); bool runOnMachineFunction(MachineFunction &F) override; + MachineFunctionProperties getRequiredProperties() const override { return MachineFunctionProperties().set( MachineFunctionProperties::Property::NoVRegs); @@ -84,16 +94,13 @@ private: bool fuseCompareOperations(MachineInstr &Compare, SmallVectorImpl<MachineInstr *> &CCUsers); - const SystemZInstrInfo *TII; - const TargetRegisterInfo *TRI; + const SystemZInstrInfo *TII = nullptr; + const TargetRegisterInfo *TRI = nullptr; }; char SystemZElimCompare::ID = 0; -} // end anonymous namespace -FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) { - return new SystemZElimCompare(TM); -} +} // end anonymous namespace // Return true if CC is live out of MBB. static bool isCCLiveOut(MachineBasicBlock &MBB) { @@ -167,7 +174,7 @@ static unsigned getCompareSourceReg(MachineInstr &Compare) { reg = Compare.getOperand(0).getReg(); else if (isLoadAndTestAsCmp(Compare)) reg = Compare.getOperand(1).getReg(); - assert (reg); + assert(reg); return reg; } @@ -216,9 +223,7 @@ bool SystemZElimCompare::convertToBRCT( Branch->RemoveOperand(0); Branch->setDesc(TII->get(BRCT)); MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch); - MIB.addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(Target); + MIB.add(MI.getOperand(0)).add(MI.getOperand(1)).add(Target); // Add a CC def to BRCT(G), since we may have to split them again if the // branch displacement overflows. BRCTH has a 32-bit displacement, so // this is not necessary there. @@ -261,10 +266,10 @@ bool SystemZElimCompare::convertToLoadAndTrap( Branch->RemoveOperand(0); Branch->setDesc(TII->get(LATOpcode)); MachineInstrBuilder(*Branch->getParent()->getParent(), Branch) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) - .addOperand(MI.getOperand(3)); + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)); MI.eraseFromParent(); return true; } @@ -368,10 +373,8 @@ static bool isCompareZero(MachineInstr &Compare) { return true; default: - if (isLoadAndTestAsCmp(Compare)) return true; - return Compare.getNumExplicitOperands() == 2 && Compare.getOperand(1).isImm() && Compare.getOperand(1).getImm() == 0; } @@ -502,15 +505,15 @@ bool SystemZElimCompare::fuseCompareOperations( Branch->setDesc(TII->get(FusedOpcode)); MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch); for (unsigned I = 0; I < SrcNOps; I++) - MIB.addOperand(Compare.getOperand(I)); - MIB.addOperand(CCMask); + MIB.add(Compare.getOperand(I)); + MIB.add(CCMask); if (Type == SystemZII::CompareAndBranch) { // Only conditional branches define CC, as they may be converted back // to a non-fused branch because of a long displacement. Conditional // returns don't have that problem. - MIB.addOperand(Target) - .addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead); + MIB.add(Target).addReg(SystemZ::CC, + RegState::ImplicitDefine | RegState::Dead); } if (Type == SystemZII::CompareAndSibcall) @@ -573,3 +576,7 @@ bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) { return Changed; } + +FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) { + return new SystemZElimCompare(TM); +} diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp index 92ce808..d02db9a 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZExpandPseudo.cpp @@ -74,7 +74,7 @@ bool SystemZExpandPseudo::expandLOCRMux(MachineBasicBlock &MBB, unsigned CCValid = MI.getOperand(3).getImm(); unsigned CCMask = MI.getOperand(4).getImm(); - LivePhysRegs LiveRegs(&TII->getRegisterInfo()); + LivePhysRegs LiveRegs(TII->getRegisterInfo()); LiveRegs.addLiveOuts(MBB); for (auto I = std::prev(MBB.end()); I != MBBI; --I) LiveRegs.stepBackward(*I); diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFeatures.td b/contrib/llvm/lib/Target/SystemZ/SystemZFeatures.td index 716e5ad..fda9c30 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZFeatures.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZFeatures.td @@ -68,6 +68,21 @@ def FeaturePopulationCount : SystemZFeature< "Assume that the population-count facility is installed" >; +def FeatureMessageSecurityAssist3 : SystemZFeature< + "message-security-assist-extension3", "MessageSecurityAssist3", + "Assume that the message-security-assist extension facility 3 is installed" +>; + +def FeatureMessageSecurityAssist4 : SystemZFeature< + "message-security-assist-extension4", "MessageSecurityAssist4", + "Assume that the message-security-assist extension facility 4 is installed" +>; + +def FeatureResetReferenceBitsMultiple : SystemZFeature< + "reset-reference-bits-multiple", "ResetReferenceBitsMultiple", + "Assume that the reset-reference-bits-multiple facility is installed" +>; + def Arch9NewFeatures : SystemZFeatureList<[ FeatureDistinctOps, FeatureFastSerialization, @@ -75,7 +90,10 @@ def Arch9NewFeatures : SystemZFeatureList<[ FeatureHighWord, FeatureInterlockedAccess1, FeatureLoadStoreOnCond, - FeaturePopulationCount + FeaturePopulationCount, + FeatureMessageSecurityAssist3, + FeatureMessageSecurityAssist4, + FeatureResetReferenceBitsMultiple ]>; //===----------------------------------------------------------------------===// @@ -109,12 +127,24 @@ def FeatureTransactionalExecution : SystemZFeature< "Assume that the transactional-execution facility is installed" >; +def FeatureDFPZonedConversion : SystemZFeature< + "dfp-zoned-conversion", "DFPZonedConversion", + "Assume that the DFP zoned-conversion facility is installed" +>; + +def FeatureEnhancedDAT2 : SystemZFeature< + "enhanced-dat-2", "EnhancedDAT2", + "Assume that the enhanced-DAT facility 2 is installed" +>; + def Arch10NewFeatures : SystemZFeatureList<[ FeatureExecutionHint, FeatureLoadAndTrap, FeatureMiscellaneousExtensions, FeatureProcessorAssist, - FeatureTransactionalExecution + FeatureTransactionalExecution, + FeatureDFPZonedConversion, + FeatureEnhancedDAT2 ]>; //===----------------------------------------------------------------------===// @@ -133,6 +163,16 @@ def FeatureLoadStoreOnCond2 : SystemZFeature< "Assume that the load/store-on-condition facility 2 is installed" >; +def FeatureMessageSecurityAssist5 : SystemZFeature< + "message-security-assist-extension5", "MessageSecurityAssist5", + "Assume that the message-security-assist extension facility 5 is installed" +>; + +def FeatureDFPPackedConversion : SystemZFeature< + "dfp-packed-conversion", "DFPPackedConversion", + "Assume that the DFP packed-conversion facility is installed" +>; + def FeatureVector : SystemZFeature< "vector", "Vector", "Assume that the vectory facility is installed" @@ -142,11 +182,65 @@ def FeatureNoVector : SystemZMissingFeature<"Vector">; def Arch11NewFeatures : SystemZFeatureList<[ FeatureLoadAndZeroRightmostByte, FeatureLoadStoreOnCond2, + FeatureMessageSecurityAssist5, + FeatureDFPPackedConversion, FeatureVector ]>; //===----------------------------------------------------------------------===// // +// New features added in the Twelvth Edition of the z/Architecture +// +//===----------------------------------------------------------------------===// + +def FeatureMiscellaneousExtensions2 : SystemZFeature< + "miscellaneous-extensions-2", "MiscellaneousExtensions2", + "Assume that the miscellaneous-extensions facility 2 is installed" +>; + +def FeatureGuardedStorage : SystemZFeature< + "guarded-storage", "GuardedStorage", + "Assume that the guarded-storage facility is installed" +>; + +def FeatureMessageSecurityAssist7 : SystemZFeature< + "message-security-assist-extension7", "MessageSecurityAssist7", + "Assume that the message-security-assist extension facility 7 is installed" +>; + +def FeatureMessageSecurityAssist8 : SystemZFeature< + "message-security-assist-extension8", "MessageSecurityAssist8", + "Assume that the message-security-assist extension facility 8 is installed" +>; + +def FeatureVectorEnhancements1 : SystemZFeature< + "vector-enhancements-1", "VectorEnhancements1", + "Assume that the vector enhancements facility 1 is installed" +>; +def FeatureNoVectorEnhancements1 : SystemZMissingFeature<"VectorEnhancements1">; + +def FeatureVectorPackedDecimal : SystemZFeature< + "vector-packed-decimal", "VectorPackedDecimal", + "Assume that the vector packed decimal facility is installed" +>; + +def FeatureInsertReferenceBitsMultiple : SystemZFeature< + "insert-reference-bits-multiple", "InsertReferenceBitsMultiple", + "Assume that the insert-reference-bits-multiple facility is installed" +>; + +def Arch12NewFeatures : SystemZFeatureList<[ + FeatureMiscellaneousExtensions2, + FeatureGuardedStorage, + FeatureMessageSecurityAssist7, + FeatureMessageSecurityAssist8, + FeatureVectorEnhancements1, + FeatureVectorPackedDecimal, + FeatureInsertReferenceBitsMultiple +]>; + +//===----------------------------------------------------------------------===// +// // Cumulative supported and unsupported feature sets // //===----------------------------------------------------------------------===// @@ -159,9 +253,13 @@ def Arch10SupportedFeatures : SystemZFeatureAdd<Arch9SupportedFeatures.List, Arch10NewFeatures.List>; def Arch11SupportedFeatures : SystemZFeatureAdd<Arch10SupportedFeatures.List, Arch11NewFeatures.List>; +def Arch12SupportedFeatures + : SystemZFeatureAdd<Arch11SupportedFeatures.List, Arch12NewFeatures.List>; -def Arch11UnsupportedFeatures +def Arch12UnsupportedFeatures : SystemZFeatureList<[]>; +def Arch11UnsupportedFeatures + : SystemZFeatureAdd<Arch12UnsupportedFeatures.List, Arch12NewFeatures.List>; def Arch10UnsupportedFeatures : SystemZFeatureAdd<Arch11UnsupportedFeatures.List, Arch11NewFeatures.List>; def Arch9UnsupportedFeatures diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp index a28a91e..0cb2b5a 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp @@ -277,8 +277,21 @@ void SystemZFrameLowering:: processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const { MachineFrameInfo &MFFrame = MF.getFrameInfo(); - uint64_t MaxReach = (MFFrame.estimateStackSize(MF) + - SystemZMC::CallFrameSize * 2); + // Get the size of our stack frame to be allocated ... + uint64_t StackSize = (MFFrame.estimateStackSize(MF) + + SystemZMC::CallFrameSize); + // ... and the maximum offset we may need to reach into the + // caller's frame to access the save area or stack arguments. + int64_t MaxArgOffset = SystemZMC::CallFrameSize; + for (int I = MFFrame.getObjectIndexBegin(); I != 0; ++I) + if (MFFrame.getObjectOffset(I) >= 0) { + int64_t ArgOffset = SystemZMC::CallFrameSize + + MFFrame.getObjectOffset(I) + + MFFrame.getObjectSize(I); + MaxArgOffset = std::max(MaxArgOffset, ArgOffset); + } + + uint64_t MaxReach = StackSize + MaxArgOffset; if (!isUInt<12>(MaxReach)) { // We may need register scavenging slots if some parts of the frame // are outside the reach of an unsigned 12-bit displacement. diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp index fe4b52b..73a1036 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp @@ -26,7 +26,7 @@ using namespace llvm; -#define DEBUG_TYPE "misched" +#define DEBUG_TYPE "machine-scheduler" // This is the limit of processor resource usage at which the // scheduler should try to look for other instructions (not using the diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h b/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h index 8fa54ee..0c755c9 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h +++ b/contrib/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.h @@ -25,10 +25,10 @@ #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZHAZARDRECOGNIZER_H #include "SystemZSubtarget.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineScheduler.h" #include "llvm/CodeGen/ScheduleHazardRecognizer.h" -#include "llvm/ADT/SmallVector.h" #include "llvm/MC/MCInstrDesc.h" #include "llvm/Support/raw_ostream.h" #include <string> diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp index 920b6e4..cd2f708 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -15,6 +15,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/KnownBits.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -711,9 +712,9 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op, // The inner check covers all cases but is more expensive. uint64_t Used = allOnes(Op.getValueSizeInBits()); if (Used != (AndMask | InsertMask)) { - APInt KnownZero, KnownOne; - CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne); - if (Used != (AndMask | InsertMask | KnownZero.getZExtValue())) + KnownBits Known; + CurDAG->computeKnownBits(Op.getOperand(0), Known); + if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue())) return false; } @@ -770,9 +771,9 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { // If some bits of Input are already known zeros, those bits will have // been removed from the mask. See if adding them back in makes the // mask suitable. - APInt KnownZero, KnownOne; - CurDAG->computeKnownBits(Input, KnownZero, KnownOne); - Mask |= KnownZero.getZExtValue(); + KnownBits Known; + CurDAG->computeKnownBits(Input, Known); + Mask |= Known.Zero.getZExtValue(); if (!refineRxSBGMask(RxSBG, Mask)) return false; } @@ -794,9 +795,9 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { // If some bits of Input are already known ones, those bits will have // been removed from the mask. See if adding them back in makes the // mask suitable. - APInt KnownZero, KnownOne; - CurDAG->computeKnownBits(Input, KnownZero, KnownOne); - Mask &= ~KnownOne.getZExtValue(); + KnownBits Known; + CurDAG->computeKnownBits(Input, Known); + Mask &= ~Known.One.getZExtValue(); if (!refineRxSBGMask(RxSBG, Mask)) return false; } diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp index 2d0a06a..2d916d2 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -20,8 +20,9 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" -#include "llvm/Support/CommandLine.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/KnownBits.h" #include <cctype> using namespace llvm; @@ -100,7 +101,10 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); } - addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); + if (Subtarget.hasVectorEnhancements1()) + addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass); + else + addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); if (Subtarget.hasVector()) { addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass); @@ -194,6 +198,9 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, setOperationAction(ISD::UMUL_LOHI, VT, Custom); // Only z196 and above have native support for conversions to unsigned. + // On z10, promoting to i64 doesn't generate an inexact condition for + // values that are outside the i32 range but in the i64 range, so use + // the default expansion. if (!Subtarget.hasFPExtension()) setOperationAction(ISD::FP_TO_UINT, VT, Expand); } @@ -312,7 +319,10 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, setOperationAction(ISD::AND, VT, Legal); setOperationAction(ISD::OR, VT, Legal); setOperationAction(ISD::XOR, VT, Legal); - setOperationAction(ISD::CTPOP, VT, Custom); + if (Subtarget.hasVectorEnhancements1()) + setOperationAction(ISD::CTPOP, VT, Legal); + else + setOperationAction(ISD::CTPOP, VT, Custom); setOperationAction(ISD::CTTZ, VT, Legal); setOperationAction(ISD::CTLZ, VT, Legal); @@ -344,9 +354,13 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, // There should be no need to check for float types other than v2f64 // since <2 x f32> isn't a legal type. setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal); setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal); setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal); setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal); } // Handle floating-point types. @@ -406,10 +420,60 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FROUND, MVT::v2f64, Legal); } + // The vector enhancements facility 1 has instructions for these. + if (Subtarget.hasVectorEnhancements1()) { + setOperationAction(ISD::FADD, MVT::v4f32, Legal); + setOperationAction(ISD::FNEG, MVT::v4f32, Legal); + setOperationAction(ISD::FSUB, MVT::v4f32, Legal); + setOperationAction(ISD::FMUL, MVT::v4f32, Legal); + setOperationAction(ISD::FMA, MVT::v4f32, Legal); + setOperationAction(ISD::FDIV, MVT::v4f32, Legal); + setOperationAction(ISD::FABS, MVT::v4f32, Legal); + setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); + setOperationAction(ISD::FRINT, MVT::v4f32, Legal); + setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); + setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); + setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); + setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); + setOperationAction(ISD::FROUND, MVT::v4f32, Legal); + + setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); + setOperationAction(ISD::FMAXNAN, MVT::f64, Legal); + setOperationAction(ISD::FMINNUM, MVT::f64, Legal); + setOperationAction(ISD::FMINNAN, MVT::f64, Legal); + + setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal); + setOperationAction(ISD::FMAXNAN, MVT::v2f64, Legal); + setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal); + setOperationAction(ISD::FMINNAN, MVT::v2f64, Legal); + + setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); + setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); + setOperationAction(ISD::FMINNUM, MVT::f32, Legal); + setOperationAction(ISD::FMINNAN, MVT::f32, Legal); + + setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); + setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); + setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); + setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); + + setOperationAction(ISD::FMAXNUM, MVT::f128, Legal); + setOperationAction(ISD::FMAXNAN, MVT::f128, Legal); + setOperationAction(ISD::FMINNUM, MVT::f128, Legal); + setOperationAction(ISD::FMINNAN, MVT::f128, Legal); + } + // We have fused multiply-addition for f32 and f64 but not f128. setOperationAction(ISD::FMA, MVT::f32, Legal); setOperationAction(ISD::FMA, MVT::f64, Legal); - setOperationAction(ISD::FMA, MVT::f128, Expand); + if (Subtarget.hasVectorEnhancements1()) + setOperationAction(ISD::FMA, MVT::f128, Legal); + else + setOperationAction(ISD::FMA, MVT::f128, Expand); + + // We don't have a copysign instruction on vector registers. + if (Subtarget.hasVectorEnhancements1()) + setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); // Needed so that we don't try to implement f128 constant loads using // a load-and-extend of a f80 constant (in cases where the constant @@ -417,6 +481,12 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM, for (MVT VT : MVT::fp_valuetypes()) setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); + // We don't have extending load instruction on vector registers. + if (Subtarget.hasVectorEnhancements1()) { + setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand); + setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand); + } + // Floating-point truncation and stores need to be done separately. setTruncStoreAction(MVT::f64, MVT::f32, Expand); setTruncStoreAction(MVT::f128, MVT::f32, Expand); @@ -481,7 +551,7 @@ bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { case MVT::f64: return true; case MVT::f128: - return false; + return Subtarget.hasVectorEnhancements1(); default: break; } @@ -822,7 +892,7 @@ bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, return isTruncateFree(FromType, ToType); } -bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { +bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { return CI->isTailCall(); } @@ -1102,9 +1172,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, // Mark the start of the call. if (!IsTailCall) - Chain = DAG.getCALLSEQ_START(Chain, - DAG.getConstant(NumBytes, DL, PtrVT, true), - DL); + Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); // Copy argument values to their designated locations. SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; @@ -1316,11 +1384,6 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); } -SDValue SystemZTargetLowering::prepareVolatileOrAtomicLoad( - SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const { - return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain); -} - // Return true if Op is an intrinsic node with chain that returns the CC value // as its only (other) argument. Provide the associated SystemZISD opcode and // the mask of valid CC values if so. @@ -1461,21 +1524,25 @@ static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) { return true; case Intrinsic::s390_vfcedbs: + case Intrinsic::s390_vfcesbs: Opcode = SystemZISD::VFCMPES; CCValid = SystemZ::CCMASK_VCMP; return true; case Intrinsic::s390_vfchdbs: + case Intrinsic::s390_vfchsbs: Opcode = SystemZISD::VFCMPHS; CCValid = SystemZ::CCMASK_VCMP; return true; case Intrinsic::s390_vfchedbs: + case Intrinsic::s390_vfchesbs: Opcode = SystemZISD::VFCMPHES; CCValid = SystemZ::CCMASK_VCMP; return true; case Intrinsic::s390_vftcidb: + case Intrinsic::s390_vftcisb: Opcode = SystemZISD::VFTCI; CCValid = SystemZ::CCMASK_VCMP; return true; @@ -2053,6 +2120,7 @@ static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, if (NewC.ICmpType != SystemZICMP::SignedOnly && NewC.Op0.getOpcode() == ISD::SHL && isSimpleShift(NewC.Op0, ShiftVal) && + (MaskVal >> ShiftVal != 0) && (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal >> ShiftVal, CmpVal >> ShiftVal, @@ -2062,6 +2130,7 @@ static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, } else if (NewC.ICmpType != SystemZICMP::SignedOnly && NewC.Op0.getOpcode() == ISD::SRL && isSimpleShift(NewC.Op0, ShiftVal) && + (MaskVal << ShiftVal != 0) && (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal << ShiftVal, CmpVal << ShiftVal, @@ -2221,15 +2290,12 @@ static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, // Lower a binary operation that produces two VT results, one in each // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, -// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation -// on the extended Op0 and (unextended) Op1. Store the even register result +// and Opcode performs the GR128 operation. Store the even register result // in Even and the odd register result in Odd. static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, - unsigned Extend, unsigned Opcode, SDValue Op0, - SDValue Op1, SDValue &Even, SDValue &Odd) { - SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); - SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, - SDValue(In128, 0), Op1); + unsigned Opcode, SDValue Op0, SDValue Op1, + SDValue &Even, SDValue &Odd) { + SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1); bool Is32Bit = is32Bit(VT); Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); @@ -2316,11 +2382,15 @@ static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, // Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode, // producing a result of type VT. -static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &DL, - EVT VT, SDValue CmpOp0, SDValue CmpOp1) { - // There is no hardware support for v4f32, so extend the vector into - // two v2f64s and compare those. - if (CmpOp0.getValueType() == MVT::v4f32) { +SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode, + const SDLoc &DL, EVT VT, + SDValue CmpOp0, + SDValue CmpOp1) const { + // There is no hardware support for v4f32 (unless we have the vector + // enhancements facility 1), so extend the vector into two v2f64s + // and compare those. + if (CmpOp0.getValueType() == MVT::v4f32 && + !Subtarget.hasVectorEnhancements1()) { SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0); SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0); SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1); @@ -2334,9 +2404,11 @@ static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &DL, // Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing // an integer mask of type VT. -static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT, - ISD::CondCode CC, SDValue CmpOp0, - SDValue CmpOp1) { +SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG, + const SDLoc &DL, EVT VT, + ISD::CondCode CC, + SDValue CmpOp0, + SDValue CmpOp1) const { bool IsFP = CmpOp0.getValueType().isFloatingPoint(); bool Invert = false; SDValue Cmp; @@ -2344,6 +2416,7 @@ static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT, // Handle tests for order using (or (ogt y x) (oge x y)). case ISD::SETUO: Invert = true; + LLVM_FALLTHROUGH; case ISD::SETO: { assert(IsFP && "Unexpected integer comparison"); SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); @@ -2355,6 +2428,7 @@ static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT, // Handle <> tests using (or (ogt y x) (ogt x y)). case ISD::SETUEQ: Invert = true; + LLVM_FALLTHROUGH; case ISD::SETONE: { assert(IsFP && "Unexpected integer comparison"); SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0); @@ -2789,8 +2863,9 @@ SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, // but we need this case for bitcasts that are created during lowering // and which are then lowered themselves. if (auto *LoadN = dyn_cast<LoadSDNode>(In)) - return DAG.getLoad(ResVT, DL, LoadN->getChain(), LoadN->getBasePtr(), - LoadN->getMemOperand()); + if (ISD::isNormalLoad(LoadN)) + return DAG.getLoad(ResVT, DL, LoadN->getChain(), LoadN->getBasePtr(), + LoadN->getMemOperand()); if (InVT == MVT::i32 && ResVT == MVT::f32) { SDValue In64; @@ -2957,8 +3032,14 @@ SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, // We define this so that it can be used for constant division. lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); + else if (Subtarget.hasMiscellaneousExtensions2()) + // SystemZISD::SMUL_LOHI returns the low result in the odd register and + // the high result in the even register. ISD::SMUL_LOHI is defined to + // return the low half first, so the results are in reverse order. + lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, + Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); else { - // Do a full 128-bit multiplication based on UMUL_LOHI64: + // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: // // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) // @@ -2976,10 +3057,10 @@ SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, SDValue RL = Op.getOperand(1); SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); - // UMUL_LOHI64 returns the low result in the odd register and the high - // result in the even register. SMUL_LOHI is defined to return the - // low half first, so the results are in reverse order. - lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, + // SystemZISD::UMUL_LOHI returns the low result in the odd register and + // the high result in the even register. ISD::SMUL_LOHI is defined to + // return the low half first, so the results are in reverse order. + lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, LL, RL, Ops[1], Ops[0]); SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); @@ -3000,10 +3081,10 @@ SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); else - // UMUL_LOHI64 returns the low result in the odd register and the high - // result in the even register. UMUL_LOHI is defined to return the - // low half first, so the results are in reverse order. - lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, + // SystemZISD::UMUL_LOHI returns the low result in the odd register and + // the high result in the even register. ISD::UMUL_LOHI is defined to + // return the low half first, so the results are in reverse order. + lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); return DAG.getMergeValues(Ops, DL); } @@ -3014,24 +3095,19 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, SDValue Op1 = Op.getOperand(1); EVT VT = Op.getValueType(); SDLoc DL(Op); - unsigned Opcode; - // We use DSGF for 32-bit division. - if (is32Bit(VT)) { + // We use DSGF for 32-bit division. This means the first operand must + // always be 64-bit, and the second operand should be 32-bit whenever + // that is possible, to improve performance. + if (is32Bit(VT)) Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); - Opcode = SystemZISD::SDIVREM32; - } else if (DAG.ComputeNumSignBits(Op1) > 32) { + else if (DAG.ComputeNumSignBits(Op1) > 32) Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); - Opcode = SystemZISD::SDIVREM32; - } else - Opcode = SystemZISD::SDIVREM64; - // DSG(F) takes a 64-bit dividend, so the even register in the GR128 - // input is "don't care". The instruction returns the remainder in - // the even register and the quotient in the odd register. + // DSG(F) returns the remainder in the even register and the + // quotient in the odd register. SDValue Ops[2]; - lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, - Op0, Op1, Ops[1], Ops[0]); + lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); return DAG.getMergeValues(Ops, DL); } @@ -3040,16 +3116,11 @@ SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, EVT VT = Op.getValueType(); SDLoc DL(Op); - // DL(G) uses a double-width dividend, so we need to clear the even - // register in the GR128 input. The instruction returns the remainder - // in the even register and the quotient in the odd register. + // DL(G) returns the remainder in the even register and the + // quotient in the odd register. SDValue Ops[2]; - if (is32Bit(VT)) - lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, - Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); - else - lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, - Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); + lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, + Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); return DAG.getMergeValues(Ops, DL); } @@ -3058,14 +3129,14 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { // Get the known-zero masks for each operand. SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; - APInt KnownZero[2], KnownOne[2]; - DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]); - DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]); + KnownBits Known[2]; + DAG.computeKnownBits(Ops[0], Known[0]); + DAG.computeKnownBits(Ops[1], Known[1]); // See if the upper 32 bits of one operand and the lower 32 bits of the // other are known zero. They are the low and high operands respectively. - uint64_t Masks[] = { KnownZero[0].getZExtValue(), - KnownZero[1].getZExtValue() }; + uint64_t Masks[] = { Known[0].Zero.getZExtValue(), + Known[1].Zero.getZExtValue() }; unsigned High, Low; if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) High = 1, Low = 0; @@ -3150,9 +3221,9 @@ SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op, } // Get the known-zero mask for the operand. - APInt KnownZero, KnownOne; - DAG.computeKnownBits(Op, KnownZero, KnownOne); - unsigned NumSignificantBits = (~KnownZero).getActiveBits(); + KnownBits Known; + DAG.computeKnownBits(Op, Known); + unsigned NumSignificantBits = (~Known.Zero).getActiveBits(); if (NumSignificantBits == 0) return DAG.getConstant(0, DL, VT); @@ -3189,13 +3260,13 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, SDLoc DL(Op); AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); - SynchronizationScope FenceScope = static_cast<SynchronizationScope>( + SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && - FenceScope == CrossThread) { + FenceSSID == SyncScope::System) { return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, Op.getOperand(0)), 0); @@ -3205,12 +3276,15 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); } -// Op is an atomic load. Lower it into a normal volatile load. +// Op is an atomic load. Lower it into a serialization followed +// by a normal volatile load. SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { auto *Node = cast<AtomicSDNode>(Op.getNode()); + SDValue Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), + MVT::Other, Node->getChain()), 0); return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), - Node->getChain(), Node->getBasePtr(), + Chain, Node->getBasePtr(), Node->getMemoryVT(), Node->getMemOperand()); } @@ -3802,7 +3876,7 @@ namespace { struct GeneralShuffle { GeneralShuffle(EVT vt) : VT(vt) {} void addUndef(); - void add(SDValue, unsigned); + bool add(SDValue, unsigned); SDValue getNode(SelectionDAG &, const SDLoc &); // The operands of the shuffle. @@ -3828,8 +3902,10 @@ void GeneralShuffle::addUndef() { // Add an extra element to the shuffle, taking it from element Elem of Op. // A null Op indicates a vector input whose value will be calculated later; // there is at most one such input per shuffle and it always has the same -// type as the result. -void GeneralShuffle::add(SDValue Op, unsigned Elem) { +// type as the result. Aborts and returns false if the source vector elements +// of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per +// LLVM they become implicitly extended, but this is rare and not optimized. +bool GeneralShuffle::add(SDValue Op, unsigned Elem) { unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); // The source vector can have wider elements than the result, @@ -3837,8 +3913,12 @@ void GeneralShuffle::add(SDValue Op, unsigned Elem) { // We want the least significant part. EVT FromVT = Op.getNode() ? Op.getValueType() : VT; unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize(); - assert(FromBytesPerElement >= BytesPerElement && - "Invalid EXTRACT_VECTOR_ELT"); + + // Return false if the source elements are smaller than their destination + // elements. + if (FromBytesPerElement < BytesPerElement) + return false; + unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes + (FromBytesPerElement - BytesPerElement)); @@ -3856,13 +3936,13 @@ void GeneralShuffle::add(SDValue Op, unsigned Elem) { break; if (NewByte < 0) { addUndef(); - return; + return true; } Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes); Byte = unsigned(NewByte) % SystemZ::VectorBytes; } else if (Op.isUndef()) { addUndef(); - return; + return true; } else break; } @@ -3879,6 +3959,8 @@ void GeneralShuffle::add(SDValue Op, unsigned Elem) { unsigned Base = OpNo * SystemZ::VectorBytes + Byte; for (unsigned I = 0; I < BytesPerElement; ++I) Bytes.push_back(Base + I); + + return true; } // Return SDNodes for the completed shuffle. @@ -4110,12 +4192,14 @@ static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && Op.getOperand(1).getOpcode() == ISD::Constant) { unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); - GS.add(Op.getOperand(0), Elem); + if (!GS.add(Op.getOperand(0), Elem)) + return SDValue(); FoundOne = true; } else if (Op.isUndef()) { GS.addUndef(); } else { - GS.add(SDValue(), ResidueOps.size()); + if (!GS.add(SDValue(), ResidueOps.size())) + return SDValue(); ResidueOps.push_back(BVN->getOperand(I)); } } @@ -4172,12 +4256,20 @@ static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, if (Single.getNode() && (Count > 1 || Single.getOpcode() == ISD::LOAD)) return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); + // If all elements are loads, use VLREP/VLEs (below). + bool AllLoads = true; + for (auto Elem : Elems) + if (Elem.getOpcode() != ISD::LOAD || cast<LoadSDNode>(Elem)->isIndexed()) { + AllLoads = false; + break; + } + // The best way of building a v2i64 from two i64s is to use VLVGP. - if (VT == MVT::v2i64) + if (VT == MVT::v2i64 && !AllLoads) return joinDwords(DAG, DL, Elems[0], Elems[1]); // Use a 64-bit merge high to combine two doubles. - if (VT == MVT::v2f64) + if (VT == MVT::v2f64 && !AllLoads) return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); // Build v4f32 values directly from the FPRs: @@ -4187,7 +4279,7 @@ static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, // <ABxx> <CDxx> // V VMRHG // <ABCD> - if (VT == MVT::v4f32) { + if (VT == MVT::v4f32 && !AllLoads) { SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); // Avoid unnecessary undefs by reusing the other operand. @@ -4229,23 +4321,37 @@ static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, Constants[I] = DAG.getUNDEF(Elems[I].getValueType()); Result = DAG.getBuildVector(VT, DL, Constants); } else { - // Otherwise try to use VLVGP to start the sequence in order to + // Otherwise try to use VLREP or VLVGP to start the sequence in order to // avoid a false dependency on any previous contents of the vector - // register. This only makes sense if one of the associated elements - // is defined. - unsigned I1 = NumElements / 2 - 1; - unsigned I2 = NumElements - 1; - bool Def1 = !Elems[I1].isUndef(); - bool Def2 = !Elems[I2].isUndef(); - if (Def1 || Def2) { - SDValue Elem1 = Elems[Def1 ? I1 : I2]; - SDValue Elem2 = Elems[Def2 ? I2 : I1]; - Result = DAG.getNode(ISD::BITCAST, DL, VT, - joinDwords(DAG, DL, Elem1, Elem2)); - Done[I1] = true; - Done[I2] = true; - } else - Result = DAG.getUNDEF(VT); + // register. + + // Use a VLREP if at least one element is a load. + unsigned LoadElIdx = UINT_MAX; + for (unsigned I = 0; I < NumElements; ++I) + if (Elems[I].getOpcode() == ISD::LOAD && + cast<LoadSDNode>(Elems[I])->isUnindexed()) { + LoadElIdx = I; + break; + } + if (LoadElIdx != UINT_MAX) { + Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, Elems[LoadElIdx]); + Done[LoadElIdx] = true; + } else { + // Try to use VLVGP. + unsigned I1 = NumElements / 2 - 1; + unsigned I2 = NumElements - 1; + bool Def1 = !Elems[I1].isUndef(); + bool Def2 = !Elems[I2].isUndef(); + if (Def1 || Def2) { + SDValue Elem1 = Elems[Def1 ? I1 : I2]; + SDValue Elem2 = Elems[Def2 ? I2 : I1]; + Result = DAG.getNode(ISD::BITCAST, DL, VT, + joinDwords(DAG, DL, Elem1, Elem2)); + Done[I1] = true; + Done[I2] = true; + } else + Result = DAG.getUNDEF(VT); + } } // Use VLVGx to insert the other elements. @@ -4354,9 +4460,9 @@ SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, int Elt = VSN->getMaskElt(I); if (Elt < 0) GS.addUndef(); - else - GS.add(Op.getOperand(unsigned(Elt) / NumElements), - unsigned(Elt) % NumElements); + else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements), + unsigned(Elt) % NumElements)) + return SDValue(); } return GS.getNode(DAG, SDLoc(VSN)); } @@ -4630,11 +4736,10 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { OPCODE(SELECT_CCMASK); OPCODE(ADJDYNALLOC); OPCODE(POPCNT); - OPCODE(UMUL_LOHI64); - OPCODE(SDIVREM32); - OPCODE(SDIVREM64); - OPCODE(UDIVREM32); - OPCODE(UDIVREM64); + OPCODE(SMUL_LOHI); + OPCODE(UMUL_LOHI); + OPCODE(SDIVREM); + OPCODE(UDIVREM); OPCODE(MVC); OPCODE(MVC_LOOP); OPCODE(NC); @@ -4649,7 +4754,6 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { OPCODE(STRCMP); OPCODE(SEARCH_STRING); OPCODE(IPM); - OPCODE(SERIALIZE); OPCODE(MEMBARRIER); OPCODE(TBEGIN); OPCODE(TBEGIN_NOFLOAT); @@ -4722,9 +4826,12 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { } // Return true if VT is a vector whose elements are a whole number of bytes -// in width. -static bool canTreatAsByteVector(EVT VT) { - return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0; +// in width. Also check for presence of vector support. +bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { + if (!Subtarget.hasVector()) + return false; + + return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); } // Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT @@ -4986,6 +5093,10 @@ SDValue SystemZTargetLowering::combineSTORE( SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT( SDNode *N, DAGCombinerInfo &DCI) const { + + if (!Subtarget.hasVector()) + return SDValue(); + // Try to simplify a vector extraction. if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { SDValue Op0 = N->getOperand(0); @@ -5233,7 +5344,7 @@ static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) - .addOperand(Base) + .add(Base) .addImm(0) .addReg(0); return Reg; @@ -5321,9 +5432,24 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { if (Invert) CCMask ^= CCValid; + + // ISel pattern matching also adds a load memory operand of the same + // address, so take special care to find the storing memory operand. + MachineMemOperand *MMO = nullptr; + for (auto *I : MI.memoperands()) + if (I->isStore()) { + MMO = I; + break; + } + BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) - .addReg(SrcReg).addOperand(Base).addImm(Disp) - .addImm(CCValid).addImm(CCMask); + .addReg(SrcReg) + .add(Base) + .addImm(Disp) + .addImm(CCValid) + .addImm(CCMask) + .addMemOperand(MMO); + MI.eraseFromParent(); return MBB; } @@ -5350,7 +5476,10 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, // # fallthrough to JoinMBB MBB = FalseMBB; BuildMI(MBB, DL, TII->get(StoreOpcode)) - .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); + .addReg(SrcReg) + .add(Base) + .addImm(Disp) + .addReg(IndexReg); MBB->addSuccessor(JoinMBB); MI.eraseFromParent(); @@ -5415,8 +5544,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( // %OrigVal = L Disp(%Base) // # fall through to LoopMMB MBB = StartMBB; - BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) - .addOperand(Base).addImm(Disp).addReg(0); + BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5437,8 +5565,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( if (Invert) { // Perform the operation normally and then invert every bit of the field. unsigned Tmp = MRI.createVirtualRegister(RC); - BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) - .addReg(RotatedOldVal).addOperand(Src2); + BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); if (BitSize <= 32) // XILF with the upper BitSize bits set. BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) @@ -5454,7 +5581,8 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( } else if (BinOpcode) // A simply binary operation. BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) - .addReg(RotatedOldVal).addOperand(Src2); + .addReg(RotatedOldVal) + .add(Src2); else if (IsSubWord) // Use RISBG to rotate Src2 into position and use it to replace the // field in RotatedOldVal. @@ -5465,7 +5593,10 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) - .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(NewVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5533,8 +5664,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( // %OrigVal = L Disp(%Base) // # fall through to LoopMMB MBB = StartMBB; - BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) - .addOperand(Base).addImm(Disp).addReg(0); + BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5581,7 +5711,10 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) - .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(NewVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5642,7 +5775,9 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, // # fall through to LoopMMB MBB = StartMBB; BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) - .addOperand(Base).addImm(Disp).addReg(0); + .add(Base) + .addImm(Disp) + .addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5696,7 +5831,10 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) - .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(StoreVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5706,14 +5844,12 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, return DoneMBB; } -// Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true +// Emit an extension from a GR64 to a GR128. ClearEven is true // if the high register of the GR128 value must be cleared or false if -// it's "don't care". SubReg is subreg_l32 when extending a GR32 -// and subreg_l64 when extending a GR64. +// it's "don't care". MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, MachineBasicBlock *MBB, - bool ClearEven, - unsigned SubReg) const { + bool ClearEven) const { MachineFunction &MF = *MBB->getParent(); const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo()); @@ -5736,7 +5872,7 @@ MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI, In128 = NewIn128; } BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) - .addReg(In128).addReg(Src).addImm(SubReg); + .addReg(In128).addReg(Src).addImm(SystemZ::subreg_l64); MI.eraseFromParent(); return MBB; @@ -5869,7 +6005,7 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( if (!isUInt<12>(DestDisp)) { unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) - .addOperand(DestBase) + .add(DestBase) .addImm(DestDisp) .addReg(0); DestBase = MachineOperand::CreateReg(Reg, false); @@ -5878,15 +6014,19 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( if (!isUInt<12>(SrcDisp)) { unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) - .addOperand(SrcBase) + .add(SrcBase) .addImm(SrcDisp) .addReg(0); SrcBase = MachineOperand::CreateReg(Reg, false); SrcDisp = 0; } BuildMI(*MBB, MI, DL, TII->get(Opcode)) - .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) - .addOperand(SrcBase).addImm(SrcDisp); + .add(DestBase) + .addImm(DestDisp) + .addImm(ThisLength) + .add(SrcBase) + .addImm(SrcDisp) + ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); DestDisp += ThisLength; SrcDisp += ThisLength; Length -= ThisLength; @@ -6057,6 +6197,7 @@ MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( case SystemZ::SelectF32: case SystemZ::SelectF64: case SystemZ::SelectF128: + case SystemZ::SelectVR128: return emitSelect(MI, MBB, 0); case SystemZ::CondStore8Mux: @@ -6096,12 +6237,10 @@ MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( case SystemZ::CondStoreF64Inv: return emitCondStore(MI, MBB, SystemZ::STD, 0, true); - case SystemZ::AEXT128_64: - return emitExt128(MI, MBB, false, SystemZ::subreg_l64); - case SystemZ::ZEXT128_32: - return emitExt128(MI, MBB, true, SystemZ::subreg_l32); - case SystemZ::ZEXT128_64: - return emitExt128(MI, MBB, true, SystemZ::subreg_l64); + case SystemZ::AEXT128: + return emitExt128(MI, MBB, false); + case SystemZ::ZEXT128: + return emitExt128(MI, MBB, true); case SystemZ::ATOMIC_SWAPW: return emitAtomicLoadBinary(MI, MBB, 0, 0); @@ -6310,3 +6449,12 @@ MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter( llvm_unreachable("Unexpected instr type to insert"); } } + +// This is only used by the isel schedulers, and is needed only to prevent +// compiler from crashing when list-ilp is used. +const TargetRegisterClass * +SystemZTargetLowering::getRepRegClassFor(MVT VT) const { + if (VT == MVT::Untyped) + return &SystemZ::ADDR128BitRegClass; + return TargetLowering::getRepRegClassFor(VT); +} diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h index 7a21a47..abe8b72 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h +++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h @@ -86,14 +86,12 @@ enum NodeType : unsigned { // Count number of bits set in operand 0 per byte. POPCNT, - // Wrappers around the ISD opcodes of the same name. The output and - // first input operands are GR128s. The trailing numbers are the - // widths of the second operand in bits. - UMUL_LOHI64, - SDIVREM32, - SDIVREM64, - UDIVREM32, - UDIVREM64, + // Wrappers around the ISD opcodes of the same name. The output is GR128. + // Input operands may be GR64 or GR32, depending on the instruction. + SMUL_LOHI, + UMUL_LOHI, + SDIVREM, + UDIVREM, // Use a series of MVCs to copy bytes from one memory location to another. // The operands are: @@ -139,9 +137,6 @@ enum NodeType : unsigned { // Store the CC value in bits 29 and 28 of an integer. IPM, - // Perform a serialization operation. (BCR 15,0 or BCR 14,0.) - SERIALIZE, - // Compiler barrier only; generate a no-op. MEMBARRIER, @@ -454,7 +449,7 @@ public: MachineBasicBlock *BB) const override; SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; bool allowTruncateForTailCall(Type *, Type *) const override; - bool mayBeEmittedAsTailCall(CallInst *CI) const override; + bool mayBeEmittedAsTailCall(const CallInst *CI) const override; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::InputArg> &Ins, @@ -471,8 +466,6 @@ public: const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override; - SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, - SelectionDAG &DAG) const override; SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; ISD::NodeType getExtendForAtomicOps() const override { @@ -487,6 +480,12 @@ private: const SystemZSubtarget &Subtarget; // Implement LowerOperation for individual opcodes. + SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, + const SDLoc &DL, EVT VT, + SDValue CmpOp0, SDValue CmpOp1) const; + SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, + EVT VT, ISD::CondCode CC, + SDValue CmpOp0, SDValue CmpOp1) const; SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; @@ -522,7 +521,6 @@ private: unsigned Opcode) const; SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; - SDValue lowerLOAD_SEQUENCE_POINT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; @@ -537,6 +535,7 @@ private: unsigned UnpackHigh) const; SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const; + bool canTreatAsByteVector(EVT VT) const; SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp, unsigned Index, DAGCombinerInfo &DCI, bool Force) const; @@ -567,7 +566,7 @@ private: unsigned StoreOpcode, unsigned STOCOpcode, bool Invert) const; MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB, - bool ClearEven, unsigned SubReg) const; + bool ClearEven) const; MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI, MachineBasicBlock *BB, unsigned BinOpcode, unsigned BitSize, @@ -589,6 +588,8 @@ private: MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const; + + const TargetRegisterClass *getRepRegClassFor(MVT VT) const override; }; } // end namespace llvm diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrDFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrDFP.td new file mode 100644 index 0000000..08ab2d7 --- /dev/null +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrDFP.td @@ -0,0 +1,231 @@ +//==- SystemZInstrDFP.td - Floating-point SystemZ instructions -*- tblgen-*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The instructions in this file implement SystemZ decimal floating-point +// arithmetic. These instructions are inot currently used for code generation, +// are provided for use with the assembler and disassembler only. If LLVM +// ever supports decimal floating-point types (_Decimal64 etc.), they can +// also be used for code generation for those types. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Move instructions +//===----------------------------------------------------------------------===// + +// Load and test. +let Defs = [CC] in { + def LTDTR : UnaryRRE<"ltdtr", 0xB3D6, null_frag, FP64, FP64>; + def LTXTR : UnaryRRE<"ltxtr", 0xB3DE, null_frag, FP128, FP128>; +} + + +//===----------------------------------------------------------------------===// +// Conversion instructions +//===----------------------------------------------------------------------===// + +// Convert floating-point values to narrower representations. The destination +// of LDXTR is a 128-bit value, but only the first register of the pair is used. +def LEDTR : TernaryRRFe<"ledtr", 0xB3D5, FP32, FP64>; +def LDXTR : TernaryRRFe<"ldxtr", 0xB3DD, FP128, FP128>; + +// Extend floating-point values to wider representations. +def LDETR : BinaryRRFd<"ldetr", 0xB3D4, FP64, FP32>; +def LXDTR : BinaryRRFd<"lxdtr", 0xB3DC, FP128, FP64>; + +// Convert a signed integer value to a floating-point one. +def CDGTR : UnaryRRE<"cdgtr", 0xB3F1, null_frag, FP64, GR64>; +def CXGTR : UnaryRRE<"cxgtr", 0xB3F9, null_frag, FP128, GR64>; +let Predicates = [FeatureFPExtension] in { + def CDGTRA : TernaryRRFe<"cdgtra", 0xB3F1, FP64, GR64>; + def CXGTRA : TernaryRRFe<"cxgtra", 0xB3F9, FP128, GR64>; + def CDFTR : TernaryRRFe<"cdftr", 0xB951, FP64, GR32>; + def CXFTR : TernaryRRFe<"cxftr", 0xB959, FP128, GR32>; +} + +// Convert an unsigned integer value to a floating-point one. +let Predicates = [FeatureFPExtension] in { + def CDLGTR : TernaryRRFe<"cdlgtr", 0xB952, FP64, GR64>; + def CXLGTR : TernaryRRFe<"cxlgtr", 0xB95A, FP128, GR64>; + def CDLFTR : TernaryRRFe<"cdlftr", 0xB953, FP64, GR32>; + def CXLFTR : TernaryRRFe<"cxlftr", 0xB95B, FP128, GR32>; +} + +// Convert a floating-point value to a signed integer value. +let Defs = [CC] in { + def CGDTR : BinaryRRFe<"cgdtr", 0xB3E1, GR64, FP64>; + def CGXTR : BinaryRRFe<"cgxtr", 0xB3E9, GR64, FP128>; + let Predicates = [FeatureFPExtension] in { + def CGDTRA : TernaryRRFe<"cgdtra", 0xB3E1, GR64, FP64>; + def CGXTRA : TernaryRRFe<"cgxtra", 0xB3E9, GR64, FP128>; + def CFDTR : TernaryRRFe<"cfdtr", 0xB941, GR32, FP64>; + def CFXTR : TernaryRRFe<"cfxtr", 0xB949, GR32, FP128>; + } +} + +// Convert a floating-point value to an unsigned integer value. +let Defs = [CC] in { + let Predicates = [FeatureFPExtension] in { + def CLGDTR : TernaryRRFe<"clgdtr", 0xB942, GR64, FP64>; + def CLGXTR : TernaryRRFe<"clgxtr", 0xB94A, GR64, FP128>; + def CLFDTR : TernaryRRFe<"clfdtr", 0xB943, GR32, FP64>; + def CLFXTR : TernaryRRFe<"clfxtr", 0xB94B, GR32, FP128>; + } +} + +// Convert a packed value to a floating-point one. +def CDSTR : UnaryRRE<"cdstr", 0xB3F3, null_frag, FP64, GR64>; +def CXSTR : UnaryRRE<"cxstr", 0xB3FB, null_frag, FP128, GR128>; +def CDUTR : UnaryRRE<"cdutr", 0xB3F2, null_frag, FP64, GR64>; +def CXUTR : UnaryRRE<"cxutr", 0xB3FA, null_frag, FP128, GR128>; + +// Convert a floating-point value to a packed value. +def CSDTR : BinaryRRFd<"csdtr", 0xB3E3, GR64, FP64>; +def CSXTR : BinaryRRFd<"csxtr", 0xB3EB, GR128, FP128>; +def CUDTR : UnaryRRE<"cudtr", 0xB3E2, null_frag, GR64, FP64>; +def CUXTR : UnaryRRE<"cuxtr", 0xB3EA, null_frag, GR128, FP128>; + +// Convert from/to memory values in the zoned format. +let Predicates = [FeatureDFPZonedConversion] in { + def CDZT : BinaryRSL<"cdzt", 0xEDAA, FP64>; + def CXZT : BinaryRSL<"cxzt", 0xEDAB, FP128>; + def CZDT : StoreBinaryRSL<"czdt", 0xEDA8, FP64>; + def CZXT : StoreBinaryRSL<"czxt", 0xEDA9, FP128>; +} + +// Convert from/to memory values in the packed format. +let Predicates = [FeatureDFPPackedConversion] in { + def CDPT : BinaryRSL<"cdpt", 0xEDAE, FP64>; + def CXPT : BinaryRSL<"cxpt", 0xEDAF, FP128>; + def CPDT : StoreBinaryRSL<"cpdt", 0xEDAC, FP64>; + def CPXT : StoreBinaryRSL<"cpxt", 0xEDAD, FP128>; +} + +// Perform floating-point operation. +let Defs = [CC, R1L, F0Q], Uses = [R0L, F4Q] in + def PFPO : SideEffectInherentE<"pfpo", 0x010A>; + + +//===----------------------------------------------------------------------===// +// Unary arithmetic +//===----------------------------------------------------------------------===// + +// Round to an integer, with the second operand (M3) specifying the rounding +// mode. M4 can be set to 4 to suppress detection of inexact conditions. +def FIDTR : TernaryRRFe<"fidtr", 0xB3D7, FP64, FP64>; +def FIXTR : TernaryRRFe<"fixtr", 0xB3DF, FP128, FP128>; + +// Extract biased exponent. +def EEDTR : UnaryRRE<"eedtr", 0xB3E5, null_frag, FP64, FP64>; +def EEXTR : UnaryRRE<"eextr", 0xB3ED, null_frag, FP128, FP128>; + +// Extract significance. +def ESDTR : UnaryRRE<"esdtr", 0xB3E7, null_frag, FP64, FP64>; +def ESXTR : UnaryRRE<"esxtr", 0xB3EF, null_frag, FP128, FP128>; + + +//===----------------------------------------------------------------------===// +// Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition. +let Defs = [CC] in { + let isCommutable = 1 in { + def ADTR : BinaryRRFa<"adtr", 0xB3D2, null_frag, FP64, FP64, FP64>; + def AXTR : BinaryRRFa<"axtr", 0xB3DA, null_frag, FP128, FP128, FP128>; + } + let Predicates = [FeatureFPExtension] in { + def ADTRA : TernaryRRFa<"adtra", 0xB3D2, FP64, FP64, FP64>; + def AXTRA : TernaryRRFa<"axtra", 0xB3DA, FP128, FP128, FP128>; + } +} + +// Subtraction. +let Defs = [CC] in { + def SDTR : BinaryRRFa<"sdtr", 0xB3D3, null_frag, FP64, FP64, FP64>; + def SXTR : BinaryRRFa<"sxtr", 0xB3DB, null_frag, FP128, FP128, FP128>; + let Predicates = [FeatureFPExtension] in { + def SDTRA : TernaryRRFa<"sdtra", 0xB3D3, FP64, FP64, FP64>; + def SXTRA : TernaryRRFa<"sxtra", 0xB3DB, FP128, FP128, FP128>; + } +} + +// Multiplication. +let isCommutable = 1 in { + def MDTR : BinaryRRFa<"mdtr", 0xB3D0, null_frag, FP64, FP64, FP64>; + def MXTR : BinaryRRFa<"mxtr", 0xB3D8, null_frag, FP128, FP128, FP128>; +} +let Predicates = [FeatureFPExtension] in { + def MDTRA : TernaryRRFa<"mdtra", 0xB3D0, FP64, FP64, FP64>; + def MXTRA : TernaryRRFa<"mxtra", 0xB3D8, FP128, FP128, FP128>; +} + +// Division. +def DDTR : BinaryRRFa<"ddtr", 0xB3D1, null_frag, FP64, FP64, FP64>; +def DXTR : BinaryRRFa<"dxtr", 0xB3D9, null_frag, FP128, FP128, FP128>; +let Predicates = [FeatureFPExtension] in { + def DDTRA : TernaryRRFa<"ddtra", 0xB3D1, FP64, FP64, FP64>; + def DXTRA : TernaryRRFa<"dxtra", 0xB3D9, FP128, FP128, FP128>; +} + +// Quantize. +def QADTR : TernaryRRFb<"qadtr", 0xB3F5, FP64, FP64, FP64>; +def QAXTR : TernaryRRFb<"qaxtr", 0xB3FD, FP128, FP128, FP128>; + +// Reround. +def RRDTR : TernaryRRFb<"rrdtr", 0xB3F7, FP64, FP64, FP64>; +def RRXTR : TernaryRRFb<"rrxtr", 0xB3FF, FP128, FP128, FP128>; + +// Shift significand left/right. +def SLDT : BinaryRXF<"sldt", 0xED40, null_frag, FP64, FP64, null_frag, 0>; +def SLXT : BinaryRXF<"slxt", 0xED48, null_frag, FP128, FP128, null_frag, 0>; +def SRDT : BinaryRXF<"srdt", 0xED41, null_frag, FP64, FP64, null_frag, 0>; +def SRXT : BinaryRXF<"srxt", 0xED49, null_frag, FP128, FP128, null_frag, 0>; + +// Insert biased exponent. +def IEDTR : BinaryRRFb<"iedtr", 0xB3F6, null_frag, FP64, FP64, FP64>; +def IEXTR : BinaryRRFb<"iextr", 0xB3FE, null_frag, FP128, FP128, FP128>; + + +//===----------------------------------------------------------------------===// +// Comparisons +//===----------------------------------------------------------------------===// + +// Compare. +let Defs = [CC] in { + def CDTR : CompareRRE<"cdtr", 0xB3E4, null_frag, FP64, FP64>; + def CXTR : CompareRRE<"cxtr", 0xB3EC, null_frag, FP128, FP128>; +} + +// Compare and signal. +let Defs = [CC] in { + def KDTR : CompareRRE<"kdtr", 0xB3E0, null_frag, FP64, FP64>; + def KXTR : CompareRRE<"kxtr", 0xB3E8, null_frag, FP128, FP128>; +} + +// Compare biased exponent. +let Defs = [CC] in { + def CEDTR : CompareRRE<"cedtr", 0xB3F4, null_frag, FP64, FP64>; + def CEXTR : CompareRRE<"cextr", 0xB3FC, null_frag, FP128, FP128>; +} + +// Test Data Class. +let Defs = [CC] in { + def TDCET : TestRXE<"tdcet", 0xED50, null_frag, FP32>; + def TDCDT : TestRXE<"tdcdt", 0xED54, null_frag, FP64>; + def TDCXT : TestRXE<"tdcxt", 0xED58, null_frag, FP128>; +} + +// Test Data Group. +let Defs = [CC] in { + def TDGET : TestRXE<"tdget", 0xED51, null_frag, FP32>; + def TDGDT : TestRXE<"tdgdt", 0xED55, null_frag, FP64>; + def TDGXT : TestRXE<"tdgxt", 0xED59, null_frag, FP128>; +} + diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td index bb6d27e..02aeaad 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td @@ -12,9 +12,12 @@ //===----------------------------------------------------------------------===// // C's ?: operator for floating-point operands. -def SelectF32 : SelectWrapper<FP32>; -def SelectF64 : SelectWrapper<FP64>; -def SelectF128 : SelectWrapper<FP128>; +def SelectF32 : SelectWrapper<f32, FP32>; +def SelectF64 : SelectWrapper<f64, FP64>; +let Predicates = [FeatureNoVectorEnhancements1] in + def SelectF128 : SelectWrapper<f128, FP128>; +let Predicates = [FeatureVectorEnhancements1] in + def SelectVR128 : SelectWrapper<f128, VR128>; defm CondStoreF32 : CondStores<FP32, nonvolatile_store, nonvolatile_load, bdxaddr20only>; @@ -69,8 +72,9 @@ let Defs = [CC], usesCustomInserter = 1 in { let Predicates = [FeatureVector] in { defm : CompareZeroFP<LTEBRCompare_VecPseudo, FP32>; defm : CompareZeroFP<LTDBRCompare_VecPseudo, FP64>; - defm : CompareZeroFP<LTXBRCompare_VecPseudo, FP128>; } +let Predicates = [FeatureVector, FeatureNoVectorEnhancements1] in + defm : CompareZeroFP<LTXBRCompare_VecPseudo, FP128>; // Moves between 64-bit integer and floating-point registers. def LGDR : UnaryRRE<"lgdr", 0xB3CD, bitconvert, GR64, FP64>; @@ -83,8 +87,12 @@ let isCodeGenOnly = 1 in { } // The sign of an FP128 is in the high register. -def : Pat<(fcopysign FP32:$src1, FP128:$src2), - (CPSDRsd FP32:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>; +let Predicates = [FeatureNoVectorEnhancements1] in + def : Pat<(fcopysign FP32:$src1, (f32 (fpround (f128 FP128:$src2)))), + (CPSDRsd FP32:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>; +let Predicates = [FeatureVectorEnhancements1] in + def : Pat<(fcopysign FP32:$src1, (f32 (fpround (f128 VR128:$src2)))), + (CPSDRsd FP32:$src1, (EXTRACT_SUBREG VR128:$src2, subreg_r64))>; // fcopysign with an FP64 result. let isCodeGenOnly = 1 in @@ -92,8 +100,12 @@ let isCodeGenOnly = 1 in def CPSDRdd : BinaryRRFb<"cpsdr", 0xB372, fcopysign, FP64, FP64, FP64>; // The sign of an FP128 is in the high register. -def : Pat<(fcopysign FP64:$src1, FP128:$src2), - (CPSDRdd FP64:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>; +let Predicates = [FeatureNoVectorEnhancements1] in + def : Pat<(fcopysign FP64:$src1, (f64 (fpround (f128 FP128:$src2)))), + (CPSDRdd FP64:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>; +let Predicates = [FeatureVectorEnhancements1] in + def : Pat<(fcopysign FP64:$src1, (f64 (fpround (f128 VR128:$src2)))), + (CPSDRdd FP64:$src1, (EXTRACT_SUBREG VR128:$src2, subreg_r64))>; // fcopysign with an FP128 result. Use "upper" as the high half and leave // the low half as-is. @@ -101,12 +113,14 @@ class CopySign128<RegisterOperand cls, dag upper> : Pat<(fcopysign FP128:$src1, cls:$src2), (INSERT_SUBREG FP128:$src1, upper, subreg_h64)>; -def : CopySign128<FP32, (CPSDRds (EXTRACT_SUBREG FP128:$src1, subreg_h64), - FP32:$src2)>; -def : CopySign128<FP64, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64), - FP64:$src2)>; -def : CopySign128<FP128, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64), - (EXTRACT_SUBREG FP128:$src2, subreg_h64))>; +let Predicates = [FeatureNoVectorEnhancements1] in { + def : CopySign128<FP32, (CPSDRds (EXTRACT_SUBREG FP128:$src1, subreg_h64), + FP32:$src2)>; + def : CopySign128<FP64, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64), + FP64:$src2)>; + def : CopySign128<FP128, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64), + (EXTRACT_SUBREG FP128:$src2, subreg_h64))>; +} defm LoadStoreF32 : MVCLoadStore<load, f32, MVCSequence, 4>; defm LoadStoreF64 : MVCLoadStore<load, f64, MVCSequence, 8>; @@ -121,7 +135,8 @@ let canFoldAsLoad = 1, SimpleBDXLoad = 1 in { defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64, 8>; // For z13 we prefer LDE over LE to avoid partial register dependencies. - def LDE32 : UnaryRXE<"lde", 0xED24, null_frag, FP32, 4>; + let isCodeGenOnly = 1 in + def LDE32 : UnaryRXE<"lde", 0xED24, null_frag, FP32, 4>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -165,20 +180,32 @@ def LEXBRA : TernaryRRFe<"lexbra", 0xB346, FP128, FP128>, def LDXBRA : TernaryRRFe<"ldxbra", 0xB345, FP128, FP128>, Requires<[FeatureFPExtension]>; -def : Pat<(f32 (fpround FP128:$src)), - (EXTRACT_SUBREG (LEXBR FP128:$src), subreg_hr32)>; -def : Pat<(f64 (fpround FP128:$src)), - (EXTRACT_SUBREG (LDXBR FP128:$src), subreg_h64)>; +let Predicates = [FeatureNoVectorEnhancements1] in { + def : Pat<(f32 (fpround FP128:$src)), + (EXTRACT_SUBREG (LEXBR FP128:$src), subreg_hr32)>; + def : Pat<(f64 (fpround FP128:$src)), + (EXTRACT_SUBREG (LDXBR FP128:$src), subreg_h64)>; +} // Extend register floating-point values to wider representations. -def LDEBR : UnaryRRE<"ldebr", 0xB304, fpextend, FP64, FP32>; -def LXEBR : UnaryRRE<"lxebr", 0xB306, fpextend, FP128, FP32>; -def LXDBR : UnaryRRE<"lxdbr", 0xB305, fpextend, FP128, FP64>; +def LDEBR : UnaryRRE<"ldebr", 0xB304, fpextend, FP64, FP32>; +def LXEBR : UnaryRRE<"lxebr", 0xB306, null_frag, FP128, FP32>; +def LXDBR : UnaryRRE<"lxdbr", 0xB305, null_frag, FP128, FP64>; +let Predicates = [FeatureNoVectorEnhancements1] in { + def : Pat<(f128 (fpextend (f32 FP32:$src))), (LXEBR FP32:$src)>; + def : Pat<(f128 (fpextend (f64 FP64:$src))), (LXDBR FP64:$src)>; +} // Extend memory floating-point values to wider representations. def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64, 4>; -def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128, 4>; -def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128, 8>; +def LXEB : UnaryRXE<"lxeb", 0xED06, null_frag, FP128, 4>; +def LXDB : UnaryRXE<"lxdb", 0xED05, null_frag, FP128, 8>; +let Predicates = [FeatureNoVectorEnhancements1] in { + def : Pat<(f128 (extloadf32 bdxaddr12only:$src)), + (LXEB bdxaddr12only:$src)>; + def : Pat<(f128 (extloadf64 bdxaddr12only:$src)), + (LXDB bdxaddr12only:$src)>; +} // Convert a signed integer register value to a floating-point one. def CEFBR : UnaryRRE<"cefbr", 0xB394, sint_to_fp, FP32, GR32>; @@ -425,30 +452,32 @@ def : Pat<(fmul (f64 (fpextend FP32:$src1)), // f128 multiplication of two FP64 registers. def MXDBR : BinaryRRE<"mxdbr", 0xB307, null_frag, FP128, FP64>; -def : Pat<(fmul (f128 (fpextend FP64:$src1)), (f128 (fpextend FP64:$src2))), - (MXDBR (INSERT_SUBREG (f128 (IMPLICIT_DEF)), - FP64:$src1, subreg_h64), FP64:$src2)>; +let Predicates = [FeatureNoVectorEnhancements1] in + def : Pat<(fmul (f128 (fpextend FP64:$src1)), (f128 (fpextend FP64:$src2))), + (MXDBR (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + FP64:$src1, subreg_h64), FP64:$src2)>; // f128 multiplication of an FP64 register and an f64 memory. def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load, 8>; -def : Pat<(fmul (f128 (fpextend FP64:$src1)), - (f128 (extloadf64 bdxaddr12only:$addr))), - (MXDB (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_h64), - bdxaddr12only:$addr)>; +let Predicates = [FeatureNoVectorEnhancements1] in + def : Pat<(fmul (f128 (fpextend FP64:$src1)), + (f128 (extloadf64 bdxaddr12only:$addr))), + (MXDB (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_h64), + bdxaddr12only:$addr)>; // Fused multiply-add. -def MAEBR : TernaryRRD<"maebr", 0xB30E, z_fma, FP32>; -def MADBR : TernaryRRD<"madbr", 0xB31E, z_fma, FP64>; +def MAEBR : TernaryRRD<"maebr", 0xB30E, z_fma, FP32, FP32>; +def MADBR : TernaryRRD<"madbr", 0xB31E, z_fma, FP64, FP64>; -def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load, 4>; -def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load, 8>; +def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, FP32, load, 4>; +def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, FP64, load, 8>; // Fused multiply-subtract. -def MSEBR : TernaryRRD<"msebr", 0xB30F, z_fms, FP32>; -def MSDBR : TernaryRRD<"msdbr", 0xB31F, z_fms, FP64>; +def MSEBR : TernaryRRD<"msebr", 0xB30F, z_fms, FP32, FP32>; +def MSDBR : TernaryRRD<"msdbr", 0xB31F, z_fms, FP64, FP64>; -def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load, 4>; -def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load, 8>; +def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, FP32, load, 4>; +def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, FP64, load, 8>; // Division. def DEBR : BinaryRRE<"debr", 0xB30D, fdiv, FP32, FP32>; @@ -458,6 +487,12 @@ def DXBR : BinaryRRE<"dxbr", 0xB34D, fdiv, FP128, FP128>; def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load, 4>; def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load, 8>; +// Divide to integer. +let Defs = [CC] in { + def DIEBR : TernaryRRFb<"diebr", 0xB353, FP32, FP32, FP32>; + def DIDBR : TernaryRRFb<"didbr", 0xB35B, FP64, FP64, FP64>; +} + //===----------------------------------------------------------------------===// // Comparisons //===----------------------------------------------------------------------===// @@ -469,6 +504,13 @@ let Defs = [CC], CCValues = 0xF in { def CEB : CompareRXE<"ceb", 0xED09, z_fcmp, FP32, load, 4>; def CDB : CompareRXE<"cdb", 0xED19, z_fcmp, FP64, load, 8>; + + def KEBR : CompareRRE<"kebr", 0xB308, null_frag, FP32, FP32>; + def KDBR : CompareRRE<"kdbr", 0xB318, null_frag, FP64, FP64>; + def KXBR : CompareRRE<"kxbr", 0xB348, null_frag, FP128, FP128>; + + def KEB : CompareRXE<"keb", 0xED08, null_frag, FP32, load, 4>; + def KDB : CompareRXE<"kdb", 0xED18, null_frag, FP64, load, 8>; } // Test Data Class. diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td index c727f48..033a0a8 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td @@ -527,6 +527,22 @@ class InstRRFc<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{3-0} = R2; } +class InstRRFd<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<4, outs, ins, asmstr, pattern> { + field bits<32> Inst; + field bits<32> SoftFail = 0; + + bits<4> R1; + bits<4> R2; + bits<4> M4; + + let Inst{31-16} = op; + let Inst{15-12} = 0; + let Inst{11-8} = M4; + let Inst{7-4} = R1; + let Inst{3-0} = R2; +} + class InstRRFe<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> : InstSystemZ<4, outs, ins, asmstr, pattern> { field bits<32> Inst; @@ -710,6 +726,37 @@ class InstRSI<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{15-0} = RI2; } +class InstRSLa<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<20> BDL1; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = BDL1{19-16}; + let Inst{35-32} = 0; + let Inst{31-16} = BDL1{15-0}; + let Inst{15-8} = 0; + let Inst{7-0} = op{7-0}; +} + +class InstRSLb<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<4> R1; + bits<24> BDL2; + bits<4> M3; + + let Inst{47-40} = op{15-8}; + let Inst{39-16} = BDL2; + let Inst{15-12} = R1; + let Inst{11-8} = M3; + let Inst{7-0} = op{7-0}; +} + class InstRSYa<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> : InstSystemZ<6, outs, ins, asmstr, pattern> { field bits<48> Inst; @@ -817,6 +864,37 @@ class InstSSa<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{15-0} = BD2; } +class InstSSb<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<20> BDL1; + bits<20> BDL2; + + let Inst{47-40} = op; + let Inst{39-36} = BDL1{19-16}; + let Inst{35-32} = BDL2{19-16}; + let Inst{31-16} = BDL1{15-0}; + let Inst{15-0} = BDL2{15-0}; +} + +class InstSSc<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<20> BDL1; + bits<16> BD2; + bits<4> I3; + + let Inst{47-40} = op; + let Inst{39-36} = BDL1{19-16}; + let Inst{35-32} = I3; + let Inst{31-16} = BDL1{15-0}; + let Inst{15-0} = BD2; +} + class InstSSd<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> : InstSystemZ<6, outs, ins, asmstr, pattern> { field bits<48> Inst; @@ -850,6 +928,20 @@ class InstSSe<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{15-0} = BD4; } +class InstSSf<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<16> BD1; + bits<24> BDL2; + + let Inst{47-40} = op; + let Inst{39-32} = BDL2{23-16}; + let Inst{31-16} = BD1; + let Inst{15-0} = BDL2{15-0}; +} + class InstSSE<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> : InstSystemZ<6, outs, ins, asmstr, pattern> { field bits<48> Inst; @@ -999,6 +1091,94 @@ class InstVRIe<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{7-0} = op{7-0}; } +class InstVRIf<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + bits<5> V2; + bits<5> V3; + bits<8> I4; + bits<4> M5; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = V1{3-0}; + let Inst{35-32} = V2{3-0}; + let Inst{31-28} = V3{3-0}; + let Inst{27-24} = 0; + let Inst{23-20} = M5; + let Inst{19-12} = I4; + let Inst{11} = V1{4}; + let Inst{10} = V2{4}; + let Inst{9} = V3{4}; + let Inst{8} = 0; + let Inst{7-0} = op{7-0}; +} + +class InstVRIg<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + bits<5> V2; + bits<8> I3; + bits<8> I4; + bits<4> M5; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = V1{3-0}; + let Inst{35-32} = V2{3-0}; + let Inst{31-24} = I4; + let Inst{23-20} = M5; + let Inst{19-12} = I3; + let Inst{11} = V1{4}; + let Inst{10} = V2{4}; + let Inst{9-8} = 0; + let Inst{7-0} = op{7-0}; +} + +class InstVRIh<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + bits<16> I2; + bits<4> I3; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = V1{3-0}; + let Inst{35-32} = 0; + let Inst{31-16} = I2; + let Inst{15-12} = I3; + let Inst{11} = V1{4}; + let Inst{10-8} = 0; + let Inst{7-0} = op{7-0}; +} + +class InstVRIi<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + bits<4> R2; + bits<8> I3; + bits<4> M4; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = V1{3-0}; + let Inst{35-32} = R2; + let Inst{31-24} = 0; + let Inst{23-20} = M4; + let Inst{19-12} = I3; + let Inst{11} = V1{4}; + let Inst{10-8} = 0; + let Inst{7-0} = op{7-0}; +} + // Depending on the instruction mnemonic, certain bits may be or-ed into // the M4 value provided as explicit operand. These are passed as m4or. class InstVRRa<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern, @@ -1167,6 +1347,67 @@ class InstVRRf<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{7-0} = op{7-0}; } +class InstVRRg<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = 0; + let Inst{35-32} = V1{3-0}; + let Inst{31-12} = 0; + let Inst{11} = 0; + let Inst{10} = V1{4}; + let Inst{9-8} = 0; + let Inst{7-0} = op{7-0}; +} + +class InstVRRh<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + bits<5> V2; + bits<4> M3; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = 0; + let Inst{35-32} = V1{3-0}; + let Inst{31-28} = V2{3-0}; + let Inst{27-24} = 0; + let Inst{23-20} = M3; + let Inst{19-12} = 0; + let Inst{11} = 0; + let Inst{10} = V1{4}; + let Inst{9} = V2{4}; + let Inst{8} = 0; + let Inst{7-0} = op{7-0}; +} + +class InstVRRi<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<4> R1; + bits<5> V2; + bits<4> M3; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = R1; + let Inst{35-32} = V2{3-0}; + let Inst{31-24} = 0; + let Inst{23-20} = M3; + let Inst{19-12} = 0; + let Inst{11} = 0; + let Inst{10} = V2{4}; + let Inst{9-8} = 0; + let Inst{7-0} = op{7-0}; +} + class InstVRSa<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> : InstSystemZ<6, outs, ins, asmstr, pattern> { field bits<48> Inst; @@ -1229,6 +1470,25 @@ class InstVRSc<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{7-0} = op{7-0}; } +class InstVRSd<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + bits<16> BD2; + bits<4> R3; + + let Inst{47-40} = op{15-8}; + let Inst{39-36} = 0; + let Inst{35-32} = R3; + let Inst{31-16} = BD2; + let Inst{15-12} = V1{3-0}; + let Inst{11-9} = 0; + let Inst{8} = V1{4}; + let Inst{7-0} = op{7-0}; +} + class InstVRV<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> : InstSystemZ<6, outs, ins, asmstr, pattern> { field bits<48> Inst; @@ -1266,6 +1526,24 @@ class InstVRX<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> let Inst{7-0} = op{7-0}; } +class InstVSI<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern> + : InstSystemZ<6, outs, ins, asmstr, pattern> { + field bits<48> Inst; + field bits<48> SoftFail = 0; + + bits<5> V1; + bits<16> BD2; + bits<8> I3; + + let Inst{47-40} = op{15-8}; + let Inst{39-32} = I3; + let Inst{31-16} = BD2; + let Inst{15-12} = V1{3-0}; + let Inst{11-9} = 0; + let Inst{8} = V1{4}; + let Inst{7-0} = op{7-0}; +} + //===----------------------------------------------------------------------===// // Instruction classes for .insn directives //===----------------------------------------------------------------------===// @@ -1567,6 +1845,9 @@ class ICV<string name> // Inherent: // One register output operand and no input operands. // +// InherentDual: +// Two register output operands and no input operands. +// // StoreInherent: // One address operand. The instruction stores to the address. // @@ -1642,8 +1923,9 @@ class ICV<string name> // Two input operands and an implicit CC output operand. // // Test: -// Two input operands and an implicit CC output operand. The second -// input operand is an "address" operand used as a test class mask. +// One or two input operands and an implicit CC output operand. If +// present, the second input operand is an "address" operand used as +// a test class mask. // // Ternary: // One register output operand and three input operands. @@ -1691,6 +1973,10 @@ class InherentRRE<string mnemonic, bits<16> opcode, RegisterOperand cls, let R2 = 0; } +class InherentDualRRE<string mnemonic, bits<16> opcode, RegisterOperand cls> + : InstRRE<opcode, (outs cls:$R1, cls:$R2), (ins), + mnemonic#"\t$R1, $R2", []>; + class InherentVRIa<string mnemonic, bits<16> opcode, bits<16> value> : InstVRIa<opcode, (outs VR128:$V1), (ins), mnemonic#"\t$V1", []> { let I2 = value; @@ -1714,6 +2000,12 @@ class SideEffectInherentS<string mnemonic, bits<16> opcode, let BD2 = 0; } +class SideEffectInherentRRE<string mnemonic, bits<16> opcode> + : InstRRE<opcode, (outs), (ins), mnemonic, []> { + let R1 = 0; + let R2 = 0; +} + // Allow an optional TLS marker symbol to generate TLS call relocations. class CallRI<string mnemonic, bits<12> opcode> : InstRIb<opcode, (outs), (ins GR64:$R1, brtarget16tls:$RI2), @@ -1804,6 +2096,25 @@ class FixedCondBranchRX<CondVariant V, string mnemonic, bits<8> opcode> let M1 = V.ccmask; } +class CondBranchRXY<string mnemonic, bits<16> opcode> + : InstRXYb<opcode, (outs), (ins cond4:$valid, cond4:$M1, bdxaddr20only:$XBD2), + !subst("#", "${M1}", mnemonic)#"\t$XBD2", []> { + let CCMaskFirst = 1; +} + +class AsmCondBranchRXY<string mnemonic, bits<16> opcode> + : InstRXYb<opcode, (outs), (ins imm32zx4:$M1, bdxaddr20only:$XBD2), + mnemonic#"\t$M1, $XBD2", []>; + +class FixedCondBranchRXY<CondVariant V, string mnemonic, bits<16> opcode, + SDPatternOperator operator = null_frag> + : InstRXYb<opcode, (outs), (ins bdxaddr20only:$XBD2), + !subst("#", V.suffix, mnemonic)#"\t$XBD2", + [(operator (load bdxaddr20only:$XBD2))]> { + let isAsmParserOnly = V.alternate; + let M1 = V.ccmask; +} + class CmpBranchRIEa<string mnemonic, bits<16> opcode, RegisterOperand cls, Immediate imm> : InstRIEa<opcode, (outs), (ins cls:$R1, imm:$I2, cond4:$M3), @@ -2084,6 +2395,13 @@ multiclass LoadMultipleRSPair<string mnemonic, bits<8> rsOpcode, } } +class LoadMultipleSSe<string mnemonic, bits<8> opcode, RegisterOperand cls> + : InstSSe<opcode, (outs cls:$R1, cls:$R3), + (ins bdaddr12only:$BD2, bdaddr12only:$BD4), + mnemonic#"\t$R1, $R3, $BD2, $BD4", []> { + let mayLoad = 1; +} + class LoadMultipleVRSa<string mnemonic, bits<16> opcode> : InstVRSa<opcode, (outs VR128:$V1, VR128:$V3), (ins bdaddr12only:$BD2), mnemonic#"\t$V1, $V3, $BD2", []> { @@ -2159,6 +2477,24 @@ class StoreLengthVRSb<string mnemonic, bits<16> opcode, let AccessBytes = bytes; } +class StoreLengthVRSd<string mnemonic, bits<16> opcode, + SDPatternOperator operator, bits<5> bytes> + : InstVRSd<opcode, (outs), (ins VR128:$V1, GR32:$R3, bdaddr12only:$BD2), + mnemonic#"\t$V1, $R3, $BD2", + [(operator VR128:$V1, GR32:$R3, bdaddr12only:$BD2)]> { + let mayStore = 1; + let AccessBytes = bytes; +} + +class StoreLengthVSI<string mnemonic, bits<16> opcode, + SDPatternOperator operator, bits<5> bytes> + : InstVSI<opcode, (outs), (ins VR128:$V1, bdaddr12only:$BD2, imm32zx8:$I3), + mnemonic#"\t$V1, $BD2, $I3", + [(operator VR128:$V1, imm32zx8:$I3, bdaddr12only:$BD2)]> { + let mayStore = 1; + let AccessBytes = bytes; +} + class StoreMultipleRS<string mnemonic, bits<8> opcode, RegisterOperand cls, AddressingMode mode = bdaddr12only> : InstRSa<opcode, (outs), (ins cls:$R1, cls:$R3, mode:$BD2), @@ -2355,6 +2691,23 @@ class UnaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator, let OpType = "reg"; } +class UnaryTiedRRE<string mnemonic, bits<16> opcode, RegisterOperand cls> + : InstRRE<opcode, (outs cls:$R1), (ins cls:$R1src), + mnemonic#"\t$R1", []> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; + let R2 = 0; +} + +class UnaryMemRRFc<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRFc<opcode, (outs cls2:$R2, cls1:$R1), (ins cls1:$R1src), + mnemonic#"\t$R1, $R2", []> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; + let M3 = 0; +} + class UnaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> : InstRIa<opcode, (outs cls:$R1), (ins imm:$I2), @@ -2570,6 +2923,11 @@ class SideEffectBinaryRX<string mnemonic, bits<8> opcode, : InstRXa<opcode, (outs), (ins cls:$R1, bdxaddr12only:$XBD2), mnemonic##"\t$R1, $XBD2", []>; +class SideEffectBinaryRXY<string mnemonic, bits<16> opcode, + RegisterOperand cls> + : InstRXYa<opcode, (outs), (ins cls:$R1, bdxaddr20only:$XBD2), + mnemonic##"\t$R1, $XBD2", []>; + class SideEffectBinaryRILPC<string mnemonic, bits<12> opcode, RegisterOperand cls> : InstRILb<opcode, (outs), (ins cls:$R1, pcrel32:$RI2), @@ -2580,16 +2938,90 @@ class SideEffectBinaryRILPC<string mnemonic, bits<12> opcode, let AddedComplexity = 7; } +class SideEffectBinaryRRE<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRE<opcode, (outs), (ins cls1:$R1, cls2:$R2), + mnemonic#"\t$R1, $R2", []>; + +class SideEffectBinaryRRFa<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRFa<opcode, (outs), (ins cls1:$R1, cls2:$R2), + mnemonic#"\t$R1, $R2", []> { + let R3 = 0; + let M4 = 0; +} + +class SideEffectBinaryRRFc<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRFc<opcode, (outs), (ins cls1:$R1, cls2:$R2), + mnemonic#"\t$R1, $R2", []> { + let M3 = 0; +} + class SideEffectBinaryIE<string mnemonic, bits<16> opcode, Immediate imm1, Immediate imm2> : InstIE<opcode, (outs), (ins imm1:$I1, imm2:$I2), mnemonic#"\t$I1, $I2", []>; +class SideEffectBinarySI<string mnemonic, bits<8> opcode, Operand imm> + : InstSI<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2), + mnemonic#"\t$BD1, $I2", []>; + class SideEffectBinarySIL<string mnemonic, bits<16> opcode, SDPatternOperator operator, Immediate imm> : InstSIL<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2), mnemonic#"\t$BD1, $I2", [(operator bdaddr12only:$BD1, imm:$I2)]>; +class SideEffectBinarySSa<string mnemonic, bits<8> opcode> + : InstSSa<opcode, (outs), (ins bdladdr12onlylen8:$BDL1, bdaddr12only:$BD2), + mnemonic##"\t$BDL1, $BD2", []>; + +class SideEffectBinarySSb<string mnemonic, bits<8> opcode> + : InstSSb<opcode, + (outs), (ins bdladdr12onlylen4:$BDL1, bdladdr12onlylen4:$BDL2), + mnemonic##"\t$BDL1, $BDL2", []>; + +class SideEffectBinarySSf<string mnemonic, bits<8> opcode> + : InstSSf<opcode, (outs), (ins bdaddr12only:$BD1, bdladdr12onlylen8:$BDL2), + mnemonic##"\t$BD1, $BDL2", []>; + +class SideEffectBinarySSE<string mnemonic, bits<16> opcode> + : InstSSE<opcode, (outs), (ins bdaddr12only:$BD1, bdaddr12only:$BD2), + mnemonic#"\t$BD1, $BD2", []>; + +class SideEffectBinaryMemMemRR<string mnemonic, bits<8> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRR<opcode, (outs cls1:$R1, cls2:$R2), (ins cls1:$R1src, cls2:$R2src), + mnemonic#"\t$R1, $R2", []> { + let Constraints = "$R1 = $R1src, $R2 = $R2src"; + let DisableEncoding = "$R1src, $R2src"; +} + +class SideEffectBinaryMemRRE<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRE<opcode, (outs cls2:$R2), (ins cls1:$R1, cls2:$R2src), + mnemonic#"\t$R1, $R2", []> { + let Constraints = "$R2 = $R2src"; + let DisableEncoding = "$R2src"; +} + +class SideEffectBinaryMemMemRRE<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRE<opcode, (outs cls1:$R1, cls2:$R2), (ins cls1:$R1src, cls2:$R2src), + mnemonic#"\t$R1, $R2", []> { + let Constraints = "$R1 = $R1src, $R2 = $R2src"; + let DisableEncoding = "$R1src, $R2src"; +} + +class SideEffectBinaryMemMemRRFc<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRFc<opcode, (outs cls1:$R1, cls2:$R2), (ins cls1:$R1src, cls2:$R2src), + mnemonic#"\t$R1, $R2", []> { + let Constraints = "$R1 = $R1src, $R2 = $R2src"; + let DisableEncoding = "$R1src, $R2src"; + let M3 = 0; +} + class BinaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRR<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2), @@ -2612,6 +3044,15 @@ class BinaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator, let DisableEncoding = "$R1src"; } +class BinaryRRD<string mnemonic, bits<16> opcode, SDPatternOperator operator, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRD<opcode, (outs cls1:$R1), (ins cls2:$R3, cls2:$R2), + mnemonic#"\t$R1, $R3, $R2", + [(set cls1:$R1, (operator cls2:$R3, cls2:$R2))]> { + let OpKey = mnemonic#cls; + let OpType = "reg"; +} + class BinaryRRFa<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2, RegisterOperand cls3> @@ -2654,6 +3095,25 @@ class BinaryRRFb<string mnemonic, bits<16> opcode, SDPatternOperator operator, let M4 = 0; } +class BinaryMemRRFc<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, Immediate imm> + : InstRRFc<opcode, (outs cls2:$R2, cls1:$R1), (ins cls1:$R1src, imm:$M3), + mnemonic#"\t$R1, $R2, $M3", []> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; +} + +multiclass BinaryMemRRFcOpt<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> { + def "" : BinaryMemRRFc<mnemonic, opcode, cls1, cls2, imm32zx4>; + def Opt : UnaryMemRRFc<mnemonic, opcode, cls1, cls2>; +} + +class BinaryRRFd<string mnemonic, bits<16> opcode, RegisterOperand cls1, + RegisterOperand cls2> + : InstRRFd<opcode, (outs cls1:$R1), (ins cls2:$R2, imm32zx4:$M4), + mnemonic#"\t$R1, $R2, $M4", []>; + class BinaryRRFe<string mnemonic, bits<16> opcode, RegisterOperand cls1, RegisterOperand cls2> : InstRRFe<opcode, (outs cls1:$R1), (ins imm32zx4:$M3, cls2:$R2), @@ -2804,6 +3264,13 @@ multiclass BinaryRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2, } } +class BinaryRSL<string mnemonic, bits<16> opcode, RegisterOperand cls> + : InstRSLb<opcode, (outs cls:$R1), + (ins bdladdr12onlylen8:$BDL2, imm32zx4:$M3), + mnemonic#"\t$R1, $BDL2, $M3", []> { + let mayLoad = 1; +} + class BinaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator, RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr12only> @@ -2833,6 +3300,18 @@ class BinaryRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator, let M3 = 0; } +class BinaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator, + RegisterOperand cls1, RegisterOperand cls2, + SDPatternOperator load, bits<5> bytes> + : InstRXF<opcode, (outs cls1:$R1), (ins cls2:$R3, bdxaddr12only:$XBD2), + mnemonic#"\t$R1, $R3, $XBD2", + [(set cls1:$R1, (operator cls2:$R3, (load bdxaddr12only:$XBD2)))]> { + let OpKey = mnemonic#"r"#cls; + let OpType = "mem"; + let mayLoad = 1; + let AccessBytes = bytes; +} + class BinaryRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr20only> @@ -2937,6 +3416,11 @@ class BinaryVRIeFloatGeneric<string mnemonic, bits<16> opcode> (ins VR128:$V2, imm32zx12:$I3, imm32zx4:$M4, imm32zx4:$M5), mnemonic#"\t$V1, $V2, $I3, $M4, $M5", []>; +class BinaryVRIh<string mnemonic, bits<16> opcode> + : InstVRIh<opcode, (outs VR128:$V1), + (ins imm32zx16:$I2, imm32zx4:$I3), + mnemonic#"\t$V1, $I2, $I3", []>; + class BinaryVRRa<string mnemonic, bits<16> opcode, SDPatternOperator operator, TypedReg tr1, TypedReg tr2, bits<4> type = 0, bits<4> m4 = 0> : InstVRRa<opcode, (outs tr1.op:$V1), (ins tr2.op:$V2, imm32zx4:$M5), @@ -3065,6 +3549,10 @@ class BinaryVRRf<string mnemonic, bits<16> opcode, SDPatternOperator operator, mnemonic#"\t$V1, $R2, $R3", [(set tr.op:$V1, (tr.vt (operator GR64:$R2, GR64:$R3)))]>; +class BinaryVRRi<string mnemonic, bits<16> opcode, RegisterOperand cls> + : InstVRRi<opcode, (outs cls:$R1), (ins VR128:$V2, imm32zx4:$M3), + mnemonic#"\t$R1, $V2, $M3", []>; + class BinaryVRSa<string mnemonic, bits<16> opcode, SDPatternOperator operator, TypedReg tr1, TypedReg tr2, bits<4> type> : InstVRSa<opcode, (outs tr1.op:$V1), (ins tr2.op:$V3, shift12only:$BD2), @@ -3102,6 +3590,15 @@ class BinaryVRScGeneric<string mnemonic, bits<16> opcode> (ins VR128:$V3, shift12only:$BD2, imm32zx4: $M4), mnemonic#"\t$R1, $V3, $BD2, $M4", []>; +class BinaryVRSd<string mnemonic, bits<16> opcode, SDPatternOperator operator, + bits<5> bytes> + : InstVRSd<opcode, (outs VR128:$V1), (ins GR32:$R3, bdaddr12only:$BD2), + mnemonic#"\t$V1, $R3, $BD2", + [(set VR128:$V1, (operator GR32:$R3, bdaddr12only:$BD2))]> { + let mayLoad = 1; + let AccessBytes = bytes; +} + class BinaryVRX<string mnemonic, bits<16> opcode, SDPatternOperator operator, TypedReg tr, bits<5> bytes> : InstVRX<opcode, (outs VR128:$V1), (ins bdxaddr12only:$XBD2, imm32zx4:$M3), @@ -3112,6 +3609,50 @@ class BinaryVRX<string mnemonic, bits<16> opcode, SDPatternOperator operator, let AccessBytes = bytes; } +class StoreBinaryRS<string mnemonic, bits<8> opcode, RegisterOperand cls, + bits<5> bytes, AddressingMode mode = bdaddr12only> + : InstRSb<opcode, (outs), (ins cls:$R1, imm32zx4:$M3, mode:$BD2), + mnemonic#"\t$R1, $M3, $BD2", []> { + let mayStore = 1; + let AccessBytes = bytes; +} + +class StoreBinaryRSY<string mnemonic, bits<16> opcode, RegisterOperand cls, + bits<5> bytes, AddressingMode mode = bdaddr20only> + : InstRSYb<opcode, (outs), (ins cls:$R1, imm32zx4:$M3, mode:$BD2), + mnemonic#"\t$R1, $M3, $BD2", []> { + let mayStore = 1; + let AccessBytes = bytes; +} + +multiclass StoreBinaryRSPair<string mnemonic, bits<8> rsOpcode, + bits<16> rsyOpcode, RegisterOperand cls, + bits<5> bytes> { + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in + def "" : StoreBinaryRS<mnemonic, rsOpcode, cls, bytes, bdaddr12pair>; + let DispSize = "20" in + def Y : StoreBinaryRSY<mnemonic#"y", rsyOpcode, cls, bytes, + bdaddr20pair>; + } +} + +class StoreBinaryRSL<string mnemonic, bits<16> opcode, RegisterOperand cls> + : InstRSLb<opcode, (outs), + (ins cls:$R1, bdladdr12onlylen8:$BDL2, imm32zx4:$M3), + mnemonic#"\t$R1, $BDL2, $M3", []> { + let mayStore = 1; +} + +class BinaryVSI<string mnemonic, bits<16> opcode, SDPatternOperator operator, + bits<5> bytes> + : InstVSI<opcode, (outs VR128:$V1), (ins bdaddr12only:$BD2, imm32zx8:$I3), + mnemonic#"\t$V1, $BD2, $I3", + [(set VR128:$V1, (operator imm32zx8:$I3, bdaddr12only:$BD2))]> { + let mayLoad = 1; + let AccessBytes = bytes; +} + class StoreBinaryVRV<string mnemonic, bits<16> opcode, bits<5> bytes, Immediate index> : InstVRV<opcode, (outs), (ins VR128:$V1, bdvaddr12only:$VBD2, index:$M3), @@ -3237,6 +3778,40 @@ multiclass CompareRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode, } } +class CompareRS<string mnemonic, bits<8> opcode, RegisterOperand cls, + bits<5> bytes, AddressingMode mode = bdaddr12only> + : InstRSb<opcode, (outs), (ins cls:$R1, imm32zx4:$M3, mode:$BD2), + mnemonic#"\t$R1, $M3, $BD2", []> { + let mayLoad = 1; + let AccessBytes = bytes; +} + +class CompareRSY<string mnemonic, bits<16> opcode, RegisterOperand cls, + bits<5> bytes, AddressingMode mode = bdaddr20only> + : InstRSYb<opcode, (outs), (ins cls:$R1, imm32zx4:$M3, mode:$BD2), + mnemonic#"\t$R1, $M3, $BD2", []> { + let mayLoad = 1; + let AccessBytes = bytes; +} + +multiclass CompareRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode, + RegisterOperand cls, bits<5> bytes> { + let DispKey = mnemonic ## #cls in { + let DispSize = "12" in + def "" : CompareRS<mnemonic, rsOpcode, cls, bytes, bdaddr12pair>; + let DispSize = "20" in + def Y : CompareRSY<mnemonic#"y", rsyOpcode, cls, bytes, bdaddr20pair>; + } +} + +class CompareSSb<string mnemonic, bits<8> opcode> + : InstSSb<opcode, + (outs), (ins bdladdr12onlylen4:$BDL1, bdladdr12onlylen4:$BDL2), + mnemonic##"\t$BDL1, $BDL2", []> { + let isCompare = 1; + let mayLoad = 1; +} + class CompareSI<string mnemonic, bits<8> opcode, SDPatternOperator operator, SDPatternOperator load, Immediate imm, AddressingMode mode = bdaddr12only> @@ -3305,6 +3880,12 @@ class CompareVRRaFloatGeneric<string mnemonic, bits<16> opcode> let M5 = 0; } +class CompareVRRh<string mnemonic, bits<16> opcode> + : InstVRRh<opcode, (outs), (ins VR128:$V1, VR128:$V2, imm32zx4:$M3), + mnemonic#"\t$V1, $V2, $M3", []> { + let isCompare = 1; +} + class TestRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator, RegisterOperand cls> : InstRXE<opcode, (outs), (ins cls:$R1, bdxaddr12only:$XBD2), @@ -3313,29 +3894,112 @@ class TestRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator, let M3 = 0; } +class TestRSL<string mnemonic, bits<16> opcode> + : InstRSLa<opcode, (outs), (ins bdladdr12onlylen4:$BDL1), + mnemonic#"\t$BDL1", []> { + let mayLoad = 1; +} + +class TestVRRg<string mnemonic, bits<16> opcode> + : InstVRRg<opcode, (outs), (ins VR128:$V1), + mnemonic#"\t$V1", []>; + +class SideEffectTernarySSc<string mnemonic, bits<8> opcode> + : InstSSc<opcode, (outs), (ins bdladdr12onlylen4:$BDL1, + shift12only:$BD2, imm32zx4:$I3), + mnemonic##"\t$BDL1, $BD2, $I3", []>; + +class SideEffectTernaryRRFa<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, + RegisterOperand cls3> + : InstRRFa<opcode, (outs), (ins cls1:$R1, cls2:$R2, cls3:$R3), + mnemonic#"\t$R1, $R2, $R3", []> { + let M4 = 0; +} + +class SideEffectTernaryRRFb<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, + RegisterOperand cls3> + : InstRRFb<opcode, (outs), (ins cls1:$R1, cls2:$R2, cls3:$R3), + mnemonic#"\t$R1, $R3, $R2", []> { + let M4 = 0; +} + +class SideEffectTernaryMemMemMemRRFb<string mnemonic, bits<16> opcode, + RegisterOperand cls1, + RegisterOperand cls2, + RegisterOperand cls3> + : InstRRFb<opcode, (outs cls1:$R1, cls2:$R2, cls3:$R3), + (ins cls1:$R1src, cls2:$R2src, cls3:$R3src), + mnemonic#"\t$R1, $R3, $R2", []> { + let Constraints = "$R1 = $R1src, $R2 = $R2src, $R3 = $R3src"; + let DisableEncoding = "$R1src, $R2src, $R3src"; + let M4 = 0; +} + class SideEffectTernaryRRFc<string mnemonic, bits<16> opcode, RegisterOperand cls1, RegisterOperand cls2, Immediate imm> : InstRRFc<opcode, (outs), (ins cls1:$R1, cls2:$R2, imm:$M3), mnemonic#"\t$R1, $R2, $M3", []>; +multiclass SideEffectTernaryRRFcOpt<string mnemonic, bits<16> opcode, + RegisterOperand cls1, + RegisterOperand cls2> { + def "" : SideEffectTernaryRRFc<mnemonic, opcode, cls1, cls2, imm32zx4>; + def Opt : SideEffectBinaryRRFc<mnemonic, opcode, cls1, cls2>; +} + +class SideEffectTernaryMemMemRRFc<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, + Immediate imm> + : InstRRFc<opcode, (outs cls1:$R1, cls2:$R2), + (ins cls1:$R1src, cls2:$R2src, imm:$M3), + mnemonic#"\t$R1, $R2, $M3", []> { + let Constraints = "$R1 = $R1src, $R2 = $R2src"; + let DisableEncoding = "$R1src, $R2src"; +} + +multiclass SideEffectTernaryMemMemRRFcOpt<string mnemonic, bits<16> opcode, + RegisterOperand cls1, + RegisterOperand cls2> { + def "" : SideEffectTernaryMemMemRRFc<mnemonic, opcode, cls1, cls2, imm32zx4>; + def Opt : SideEffectBinaryMemMemRRFc<mnemonic, opcode, cls1, cls2>; +} + class SideEffectTernarySSF<string mnemonic, bits<12> opcode, RegisterOperand cls> : InstSSF<opcode, (outs), (ins bdaddr12only:$BD1, bdaddr12only:$BD2, cls:$R3), mnemonic#"\t$BD1, $BD2, $R3", []>; +class TernaryRRFa<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, + RegisterOperand cls3> + : InstRRFa<opcode, (outs cls1:$R1), (ins cls2:$R2, cls3:$R3, imm32zx4:$M4), + mnemonic#"\t$R1, $R2, $R3, $M4", []>; + +class TernaryRRFb<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, + RegisterOperand cls3> + : InstRRFb<opcode, (outs cls1:$R1, cls3:$R3), + (ins cls1:$R1src, cls2:$R2, imm32zx4:$M4), + mnemonic#"\t$R1, $R3, $R2, $M4", []> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; +} + class TernaryRRFe<string mnemonic, bits<16> opcode, RegisterOperand cls1, RegisterOperand cls2> : InstRRFe<opcode, (outs cls1:$R1), (ins imm32zx4:$M3, cls2:$R2, imm32zx4:$M4), mnemonic#"\t$R1, $M3, $R2, $M4", []>; -class TernaryRRD<string mnemonic, bits<16> opcode, - SDPatternOperator operator, RegisterOperand cls> - : InstRRD<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, cls:$R2), +class TernaryRRD<string mnemonic, bits<16> opcode, SDPatternOperator operator, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRD<opcode, (outs cls1:$R1), (ins cls2:$R1src, cls2:$R3, cls2:$R2), mnemonic#"\t$R1, $R3, $R2", - [(set cls:$R1, (operator cls:$R1src, cls:$R3, cls:$R2))]> { + [(set cls1:$R1, (operator cls2:$R1src, cls2:$R3, cls2:$R2))]> { let OpKey = mnemonic#cls; let OpType = "reg"; let Constraints = "$R1 = $R1src"; @@ -3376,13 +4040,44 @@ multiclass TernaryRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode, } } +class SideEffectTernaryRS<string mnemonic, bits<8> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRSa<opcode, (outs), + (ins cls1:$R1, cls2:$R3, bdaddr12only:$BD2), + mnemonic#"\t$R1, $R3, $BD2", []>; + +class SideEffectTernaryRSY<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRSYa<opcode, (outs), + (ins cls1:$R1, cls2:$R3, bdaddr20only:$BD2), + mnemonic#"\t$R1, $R3, $BD2", []>; + +class SideEffectTernaryMemMemRS<string mnemonic, bits<8> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRSa<opcode, (outs cls1:$R1, cls2:$R3), + (ins cls1:$R1src, cls2:$R3src, shift12only:$BD2), + mnemonic#"\t$R1, $R3, $BD2", []> { + let Constraints = "$R1 = $R1src, $R3 = $R3src"; + let DisableEncoding = "$R1src, $R3src"; +} + +class SideEffectTernaryMemMemRSY<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRSYa<opcode, (outs cls1:$R1, cls2:$R3), + (ins cls1:$R1src, cls2:$R3src, shift20only:$BD2), + mnemonic#"\t$R1, $R3, $BD2", []> { + let Constraints = "$R1 = $R1src, $R3 = $R3src"; + let DisableEncoding = "$R1src, $R3src"; +} + class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, bits<5> bytes> - : InstRXF<opcode, (outs cls:$R1), - (ins cls:$R1src, cls:$R3, bdxaddr12only:$XBD2), + RegisterOperand cls1, RegisterOperand cls2, + SDPatternOperator load, bits<5> bytes> + : InstRXF<opcode, (outs cls1:$R1), + (ins cls2:$R1src, cls2:$R3, bdxaddr12only:$XBD2), mnemonic#"\t$R1, $R3, $XBD2", - [(set cls:$R1, (operator cls:$R1src, cls:$R3, - (load bdxaddr12only:$XBD2)))]> { + [(set cls1:$R1, (operator cls2:$R1src, cls2:$R3, + (load bdxaddr12only:$XBD2)))]> { let OpKey = mnemonic#"r"#cls; let OpType = "mem"; let Constraints = "$R1 = $R1src"; @@ -3412,6 +4107,11 @@ class TernaryVRId<string mnemonic, bits<16> opcode, SDPatternOperator operator, let M5 = type; } +class TernaryVRIi<string mnemonic, bits<16> opcode, RegisterOperand cls> + : InstVRIi<opcode, (outs VR128:$V1), + (ins cls:$R2, imm32zx8:$I3, imm32zx4:$M4), + mnemonic#"\t$V1, $R2, $I3, $M4", []>; + class TernaryVRRa<string mnemonic, bits<16> opcode, SDPatternOperator operator, TypedReg tr1, TypedReg tr2, bits<4> type, bits<4> m4or> : InstVRRa<opcode, (outs tr1.op:$V1), @@ -3484,6 +4184,25 @@ class TernaryVRRc<string mnemonic, bits<16> opcode, SDPatternOperator operator, let M6 = 0; } +class TernaryVRRcFloat<string mnemonic, bits<16> opcode, + SDPatternOperator operator, TypedReg tr1, TypedReg tr2, + bits<4> type = 0, bits<4> m5 = 0> + : InstVRRc<opcode, (outs tr1.op:$V1), + (ins tr2.op:$V2, tr2.op:$V3, imm32zx4:$M6), + mnemonic#"\t$V1, $V2, $V3, $M6", + [(set tr1.op:$V1, (tr1.vt (operator (tr2.vt tr2.op:$V2), + (tr2.vt tr2.op:$V3), + imm32zx4:$M6)))]> { + let M4 = type; + let M5 = m5; +} + +class TernaryVRRcFloatGeneric<string mnemonic, bits<16> opcode> + : InstVRRc<opcode, (outs VR128:$V1), + (ins VR128:$V2, VR128:$V3, imm32zx4:$M4, imm32zx4:$M5, + imm32zx4:$M6), + mnemonic#"\t$V1, $V2, $V3, $M4, $M5, $M6", []>; + class TernaryVRRd<string mnemonic, bits<16> opcode, SDPatternOperator operator, TypedReg tr1, TypedReg tr2, bits<4> type = 0> : InstVRRd<opcode, (outs tr1.op:$V1), @@ -3589,20 +4308,38 @@ class QuaternaryVRIdGeneric<string mnemonic, bits<16> opcode> let DisableEncoding = "$V1src"; } +class QuaternaryVRIf<string mnemonic, bits<16> opcode> + : InstVRIf<opcode, (outs VR128:$V1), + (ins VR128:$V2, VR128:$V3, + imm32zx8:$I4, imm32zx4:$M5), + mnemonic#"\t$V1, $V2, $V3, $I4, $M5", []>; + +class QuaternaryVRIg<string mnemonic, bits<16> opcode> + : InstVRIg<opcode, (outs VR128:$V1), + (ins VR128:$V2, imm32zx8:$I3, + imm32zx8:$I4, imm32zx4:$M5), + mnemonic#"\t$V1, $V2, $I3, $I4, $M5", []>; + class QuaternaryVRRd<string mnemonic, bits<16> opcode, SDPatternOperator operator, TypedReg tr1, TypedReg tr2, - bits<4> type, SDPatternOperator m6mask, bits<4> m6or> + TypedReg tr3, TypedReg tr4, bits<4> type, + SDPatternOperator m6mask = imm32zx4, bits<4> m6or = 0> : InstVRRd<opcode, (outs tr1.op:$V1), - (ins tr2.op:$V2, tr2.op:$V3, tr2.op:$V4, m6mask:$M6), + (ins tr2.op:$V2, tr3.op:$V3, tr4.op:$V4, m6mask:$M6), mnemonic#"\t$V1, $V2, $V3, $V4, $M6", [(set tr1.op:$V1, (tr1.vt (operator (tr2.vt tr2.op:$V2), - (tr2.vt tr2.op:$V3), - (tr2.vt tr2.op:$V4), + (tr3.vt tr3.op:$V3), + (tr4.vt tr4.op:$V4), m6mask:$M6)))], m6or> { let M5 = type; } +class QuaternaryVRRdGeneric<string mnemonic, bits<16> opcode> + : InstVRRd<opcode, (outs VR128:$V1), + (ins VR128:$V2, VR128:$V3, VR128:$V4, imm32zx4:$M5, imm32zx4:$M6), + mnemonic#"\t$V1, $V2, $V3, $V4, $M5, $M6", []>; + // Declare a pair of instructions, one which sets CC and one which doesn't. // The CC-setting form ends with "S" and sets the low bit of M6. // Also create aliases to make use of M6 operand optional in assembler. @@ -3611,13 +4348,15 @@ multiclass QuaternaryOptVRRdSPair<string mnemonic, bits<16> opcode, SDPatternOperator operator_cc, TypedReg tr1, TypedReg tr2, bits<4> type, bits<4> modifier = 0> { - def "" : QuaternaryVRRd<mnemonic, opcode, operator, tr1, tr2, type, + def "" : QuaternaryVRRd<mnemonic, opcode, operator, + tr1, tr2, tr2, tr2, type, imm32zx4even, !and (modifier, 14)>; def : InstAlias<mnemonic#"\t$V1, $V2, $V3, $V4", (!cast<Instruction>(NAME) tr1.op:$V1, tr2.op:$V2, tr2.op:$V3, tr2.op:$V4, 0)>; let Defs = [CC] in - def S : QuaternaryVRRd<mnemonic##"s", opcode, operator_cc, tr1, tr2, type, + def S : QuaternaryVRRd<mnemonic##"s", opcode, operator_cc, + tr1, tr2, tr2, tr2, type, imm32zx4even, !add (!and (modifier, 14), 1)>; def : InstAlias<mnemonic#"s\t$V1, $V2, $V3, $V4", (!cast<Instruction>(NAME#"S") tr1.op:$V1, tr2.op:$V2, @@ -3625,15 +4364,41 @@ multiclass QuaternaryOptVRRdSPair<string mnemonic, bits<16> opcode, } multiclass QuaternaryOptVRRdSPairGeneric<string mnemonic, bits<16> opcode> { - def "" : InstVRRd<opcode, (outs VR128:$V1), - (ins VR128:$V2, VR128:$V3, VR128:$V4, - imm32zx4:$M5, imm32zx4:$M6), - mnemonic#"\t$V1, $V2, $V3, $V4, $M5, $M6", []>; + def "" : QuaternaryVRRdGeneric<mnemonic, opcode>; def : InstAlias<mnemonic#"\t$V1, $V2, $V3, $V4, $M5", (!cast<Instruction>(NAME) VR128:$V1, VR128:$V2, VR128:$V3, VR128:$V4, imm32zx4:$M5, 0)>; } +class SideEffectQuaternaryRRFa<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, + RegisterOperand cls3> + : InstRRFa<opcode, (outs), (ins cls1:$R1, cls2:$R2, cls3:$R3, imm32zx4:$M4), + mnemonic#"\t$R1, $R2, $R3, $M4", []>; + +multiclass SideEffectQuaternaryRRFaOptOpt<string mnemonic, bits<16> opcode, + RegisterOperand cls1, + RegisterOperand cls2, + RegisterOperand cls3> { + def "" : SideEffectQuaternaryRRFa<mnemonic, opcode, cls1, cls2, cls3>; + def Opt : SideEffectTernaryRRFa<mnemonic, opcode, cls1, cls2, cls3>; + def OptOpt : SideEffectBinaryRRFa<mnemonic, opcode, cls1, cls2>; +} + +class SideEffectQuaternaryRRFb<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2, + RegisterOperand cls3> + : InstRRFb<opcode, (outs), (ins cls1:$R1, cls2:$R2, cls3:$R3, imm32zx4:$M4), + mnemonic#"\t$R1, $R3, $R2, $M4", []>; + +multiclass SideEffectQuaternaryRRFbOpt<string mnemonic, bits<16> opcode, + RegisterOperand cls1, + RegisterOperand cls2, + RegisterOperand cls3> { + def "" : SideEffectQuaternaryRRFb<mnemonic, opcode, cls1, cls2, cls3>; + def Opt : SideEffectTernaryRRFb<mnemonic, opcode, cls1, cls2, cls3>; +} + class SideEffectQuaternarySSe<string mnemonic, bits<8> opcode, RegisterOperand cls> : InstSSe<opcode, (outs), @@ -3649,6 +4414,16 @@ class LoadAndOpRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator, let mayStore = 1; } +class CmpSwapRRE<string mnemonic, bits<16> opcode, + RegisterOperand cls1, RegisterOperand cls2> + : InstRRE<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2), + mnemonic#"\t$R1, $R2", []> { + let Constraints = "$R1 = $R1src"; + let DisableEncoding = "$R1src"; + let mayLoad = 1; + let mayStore = 1; +} + class CmpSwapRS<string mnemonic, bits<8> opcode, SDPatternOperator operator, RegisterOperand cls, AddressingMode mode = bdaddr12only> : InstRSa<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, mode:$BD2), @@ -3897,10 +4672,10 @@ class RotateSelectRIEfPseudo<RegisterOperand cls1, RegisterOperand cls2> // Implements "$dst = $cc & (8 >> CC) ? $src1 : $src2", where CC is // the value of the PSW's 2-bit condition code field. -class SelectWrapper<RegisterOperand cls> +class SelectWrapper<ValueType vt, RegisterOperand cls> : Pseudo<(outs cls:$dst), (ins cls:$src1, cls:$src2, imm32zx4:$valid, imm32zx4:$cc), - [(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2, + [(set (vt cls:$dst), (z_select_ccmask cls:$src1, cls:$src2, imm32zx4:$valid, imm32zx4:$cc))]> { let usesCustomInserter = 1; // Although the instructions used by these nodes do not in themselves @@ -3981,9 +4756,7 @@ class AtomicLoadWBinaryImm<SDPatternOperator operator, Immediate imm> // another instruction to handle the excess. multiclass MemorySS<string mnemonic, bits<8> opcode, SDPatternOperator sequence, SDPatternOperator loop> { - def "" : InstSSa<opcode, (outs), (ins bdladdr12onlylen8:$BDL1, - bdaddr12only:$BD2), - mnemonic##"\t$BDL1, $BD2", []>; + def "" : SideEffectBinarySSa<mnemonic, opcode>; let usesCustomInserter = 1, hasNoSchedulingInfo = 1 in { def Sequence : Pseudo<(outs), (ins bdaddr12only:$dest, bdaddr12only:$src, imm64:$length), @@ -4003,13 +4776,8 @@ multiclass MemorySS<string mnemonic, bits<8> opcode, // the full loop (the main instruction plus the branch on CC==3). multiclass StringRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator> { - def "" : InstRRE<opcode, (outs GR64:$R1, GR64:$R2), - (ins GR64:$R1src, GR64:$R2src), - mnemonic#"\t$R1, $R2", []> { - let Uses = [R0L]; - let Constraints = "$R1 = $R1src, $R2 = $R2src"; - let DisableEncoding = "$R1src, $R2src"; - } + let Uses = [R0L] in + def "" : SideEffectBinaryMemMemRRE<mnemonic, opcode, GR64, GR64>; let usesCustomInserter = 1, hasNoSchedulingInfo = 1 in def Loop : Pseudo<(outs GR64:$end), (ins GR64:$start1, GR64:$start2, GR32:$char), diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrHFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrHFP.td new file mode 100644 index 0000000..6d5b4b9 --- /dev/null +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrHFP.td @@ -0,0 +1,240 @@ +//==- SystemZInstrHFP.td - Floating-point SystemZ instructions -*- tblgen-*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The instructions in this file implement SystemZ hexadecimal floating-point +// arithmetic. Since this format is not mapped to any source-language data +// type, these instructions are not used for code generation, but are provided +// for use with the assembler and disassembler only. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Move instructions +//===----------------------------------------------------------------------===// + +// Load and test. +let Defs = [CC] in { + def LTER : UnaryRR <"lter", 0x32, null_frag, FP32, FP32>; + def LTDR : UnaryRR <"ltdr", 0x22, null_frag, FP64, FP64>; + def LTXR : UnaryRRE<"ltxr", 0xB362, null_frag, FP128, FP128>; +} + +//===----------------------------------------------------------------------===// +// Conversion instructions +//===----------------------------------------------------------------------===// + +// Convert floating-point values to narrower representations. +def LEDR : UnaryRR <"ledr", 0x35, null_frag, FP32, FP64>; +def LEXR : UnaryRRE<"lexr", 0xB366, null_frag, FP32, FP128>; +def LDXR : UnaryRR <"ldxr", 0x25, null_frag, FP64, FP128>; +let isAsmParserOnly = 1 in { + def LRER : UnaryRR <"lrer", 0x35, null_frag, FP32, FP64>; + def LRDR : UnaryRR <"lrdr", 0x25, null_frag, FP64, FP128>; +} + +// Extend floating-point values to wider representations. +def LDER : UnaryRRE<"lder", 0xB324, null_frag, FP64, FP32>; +def LXER : UnaryRRE<"lxer", 0xB326, null_frag, FP128, FP32>; +def LXDR : UnaryRRE<"lxdr", 0xB325, null_frag, FP128, FP64>; + +def LDE : UnaryRXE<"lde", 0xED24, null_frag, FP64, 4>; +def LXE : UnaryRXE<"lxe", 0xED26, null_frag, FP128, 4>; +def LXD : UnaryRXE<"lxd", 0xED25, null_frag, FP128, 8>; + +// Convert a signed integer register value to a floating-point one. +def CEFR : UnaryRRE<"cefr", 0xB3B4, null_frag, FP32, GR32>; +def CDFR : UnaryRRE<"cdfr", 0xB3B5, null_frag, FP64, GR32>; +def CXFR : UnaryRRE<"cxfr", 0xB3B6, null_frag, FP128, GR32>; + +def CEGR : UnaryRRE<"cegr", 0xB3C4, null_frag, FP32, GR64>; +def CDGR : UnaryRRE<"cdgr", 0xB3C5, null_frag, FP64, GR64>; +def CXGR : UnaryRRE<"cxgr", 0xB3C6, null_frag, FP128, GR64>; + +// Convert a floating-point register value to a signed integer value, +// with the second operand (modifier M3) specifying the rounding mode. +let Defs = [CC] in { + def CFER : BinaryRRFe<"cfer", 0xB3B8, GR32, FP32>; + def CFDR : BinaryRRFe<"cfdr", 0xB3B9, GR32, FP64>; + def CFXR : BinaryRRFe<"cfxr", 0xB3BA, GR32, FP128>; + + def CGER : BinaryRRFe<"cger", 0xB3C8, GR64, FP32>; + def CGDR : BinaryRRFe<"cgdr", 0xB3C9, GR64, FP64>; + def CGXR : BinaryRRFe<"cgxr", 0xB3CA, GR64, FP128>; +} + +// Convert BFP to HFP. +let Defs = [CC] in { + def THDER : UnaryRRE<"thder", 0xB358, null_frag, FP64, FP32>; + def THDR : UnaryRRE<"thdr", 0xB359, null_frag, FP64, FP64>; +} + +// Convert HFP to BFP. +let Defs = [CC] in { + def TBEDR : BinaryRRFe<"tbedr", 0xB350, FP32, FP64>; + def TBDR : BinaryRRFe<"tbdr", 0xB351, FP64, FP64>; +} + + +//===----------------------------------------------------------------------===// +// Unary arithmetic +//===----------------------------------------------------------------------===// + +// Negation (Load Complement). +let Defs = [CC] in { + def LCER : UnaryRR <"lcer", 0x33, null_frag, FP32, FP32>; + def LCDR : UnaryRR <"lcdr", 0x23, null_frag, FP64, FP64>; + def LCXR : UnaryRRE<"lcxr", 0xB363, null_frag, FP128, FP128>; +} + +// Absolute value (Load Positive). +let Defs = [CC] in { + def LPER : UnaryRR <"lper", 0x30, null_frag, FP32, FP32>; + def LPDR : UnaryRR <"lpdr", 0x20, null_frag, FP64, FP64>; + def LPXR : UnaryRRE<"lpxr", 0xB360, null_frag, FP128, FP128>; +} + +// Negative absolute value (Load Negative). +let Defs = [CC] in { + def LNER : UnaryRR <"lner", 0x31, null_frag, FP32, FP32>; + def LNDR : UnaryRR <"lndr", 0x21, null_frag, FP64, FP64>; + def LNXR : UnaryRRE<"lnxr", 0xB361, null_frag, FP128, FP128>; +} + +// Halve. +def HER : UnaryRR <"her", 0x34, null_frag, FP32, FP32>; +def HDR : UnaryRR <"hdr", 0x24, null_frag, FP64, FP64>; + +// Square root. +def SQER : UnaryRRE<"sqer", 0xB245, null_frag, FP32, FP32>; +def SQDR : UnaryRRE<"sqdr", 0xB244, null_frag, FP64, FP64>; +def SQXR : UnaryRRE<"sqxr", 0xB336, null_frag, FP128, FP128>; + +def SQE : UnaryRXE<"sqe", 0xED34, null_frag, FP32, 4>; +def SQD : UnaryRXE<"sqd", 0xED35, null_frag, FP64, 8>; + +// Round to an integer (rounding towards zero). +def FIER : UnaryRRE<"fier", 0xB377, null_frag, FP32, FP32>; +def FIDR : UnaryRRE<"fidr", 0xB37F, null_frag, FP64, FP64>; +def FIXR : UnaryRRE<"fixr", 0xB367, null_frag, FP128, FP128>; + + +//===----------------------------------------------------------------------===// +// Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition. +let Defs = [CC] in { + let isCommutable = 1 in { + def AER : BinaryRR<"aer", 0x3A, null_frag, FP32, FP32>; + def ADR : BinaryRR<"adr", 0x2A, null_frag, FP64, FP64>; + def AXR : BinaryRR<"axr", 0x36, null_frag, FP128, FP128>; + } + def AE : BinaryRX<"ae", 0x7A, null_frag, FP32, load, 4>; + def AD : BinaryRX<"ad", 0x6A, null_frag, FP64, load, 8>; +} + +// Addition (unnormalized). +let Defs = [CC] in { + let isCommutable = 1 in { + def AUR : BinaryRR<"aur", 0x3E, null_frag, FP32, FP32>; + def AWR : BinaryRR<"awr", 0x2E, null_frag, FP64, FP64>; + } + def AU : BinaryRX<"au", 0x7E, null_frag, FP32, load, 4>; + def AW : BinaryRX<"aw", 0x6E, null_frag, FP64, load, 8>; +} + +// Subtraction. +let Defs = [CC] in { + def SER : BinaryRR<"ser", 0x3B, null_frag, FP32, FP32>; + def SDR : BinaryRR<"sdr", 0x2B, null_frag, FP64, FP64>; + def SXR : BinaryRR<"sxr", 0x37, null_frag, FP128, FP128>; + + def SE : BinaryRX<"se", 0x7B, null_frag, FP32, load, 4>; + def SD : BinaryRX<"sd", 0x6B, null_frag, FP64, load, 8>; +} + +// Subtraction (unnormalized). +let Defs = [CC] in { + def SUR : BinaryRR<"sur", 0x3F, null_frag, FP32, FP32>; + def SWR : BinaryRR<"swr", 0x2F, null_frag, FP64, FP64>; + + def SU : BinaryRX<"su", 0x7F, null_frag, FP32, load, 4>; + def SW : BinaryRX<"sw", 0x6F, null_frag, FP64, load, 8>; +} + +// Multiplication. +let isCommutable = 1 in { + def MEER : BinaryRRE<"meer", 0xB337, null_frag, FP32, FP32>; + def MDR : BinaryRR <"mdr", 0x2C, null_frag, FP64, FP64>; + def MXR : BinaryRR <"mxr", 0x26, null_frag, FP128, FP128>; +} +def MEE : BinaryRXE<"mee", 0xED37, null_frag, FP32, load, 4>; +def MD : BinaryRX <"md", 0x6C, null_frag, FP64, load, 8>; + +// Extending multiplication (f32 x f32 -> f64). +def MDER : BinaryRR<"mder", 0x3C, null_frag, FP64, FP32>; +def MDE : BinaryRX<"mde", 0x7C, null_frag, FP64, load, 4>; +let isAsmParserOnly = 1 in { + def MER : BinaryRR<"mer", 0x3C, null_frag, FP64, FP32>; + def ME : BinaryRX<"me", 0x7C, null_frag, FP64, load, 4>; +} + +// Extending multiplication (f64 x f64 -> f128). +def MXDR : BinaryRR<"mxdr", 0x27, null_frag, FP128, FP64>; +def MXD : BinaryRX<"mxd", 0x67, null_frag, FP128, load, 8>; + +// Fused multiply-add. +def MAER : TernaryRRD<"maer", 0xB32E, null_frag, FP32, FP32>; +def MADR : TernaryRRD<"madr", 0xB33E, null_frag, FP64, FP64>; +def MAE : TernaryRXF<"mae", 0xED2E, null_frag, FP32, FP32, load, 4>; +def MAD : TernaryRXF<"mad", 0xED3E, null_frag, FP64, FP64, load, 8>; + +// Fused multiply-subtract. +def MSER : TernaryRRD<"mser", 0xB32F, null_frag, FP32, FP32>; +def MSDR : TernaryRRD<"msdr", 0xB33F, null_frag, FP64, FP64>; +def MSE : TernaryRXF<"mse", 0xED2F, null_frag, FP32, FP32, load, 4>; +def MSD : TernaryRXF<"msd", 0xED3F, null_frag, FP64, FP64, load, 8>; + +// Multiplication (unnormalized). +def MYR : BinaryRRD<"myr", 0xB33B, null_frag, FP128, FP64>; +def MYHR : BinaryRRD<"myhr", 0xB33D, null_frag, FP64, FP64>; +def MYLR : BinaryRRD<"mylr", 0xB339, null_frag, FP64, FP64>; +def MY : BinaryRXF<"my", 0xED3B, null_frag, FP128, FP64, load, 8>; +def MYH : BinaryRXF<"myh", 0xED3D, null_frag, FP64, FP64, load, 8>; +def MYL : BinaryRXF<"myl", 0xED39, null_frag, FP64, FP64, load, 8>; + +// Fused multiply-add (unnormalized). +def MAYR : TernaryRRD<"mayr", 0xB33A, null_frag, FP128, FP64>; +def MAYHR : TernaryRRD<"mayhr", 0xB33C, null_frag, FP64, FP64>; +def MAYLR : TernaryRRD<"maylr", 0xB338, null_frag, FP64, FP64>; +def MAY : TernaryRXF<"may", 0xED3A, null_frag, FP128, FP64, load, 8>; +def MAYH : TernaryRXF<"mayh", 0xED3C, null_frag, FP64, FP64, load, 8>; +def MAYL : TernaryRXF<"mayl", 0xED38, null_frag, FP64, FP64, load, 8>; + +// Division. +def DER : BinaryRR <"der", 0x3D, null_frag, FP32, FP32>; +def DDR : BinaryRR <"ddr", 0x2D, null_frag, FP64, FP64>; +def DXR : BinaryRRE<"dxr", 0xB22D, null_frag, FP128, FP128>; +def DE : BinaryRX <"de", 0x7D, null_frag, FP32, load, 4>; +def DD : BinaryRX <"dd", 0x6D, null_frag, FP64, load, 8>; + + +//===----------------------------------------------------------------------===// +// Comparisons +//===----------------------------------------------------------------------===// + +let Defs = [CC] in { + def CER : CompareRR <"cer", 0x39, null_frag, FP32, FP32>; + def CDR : CompareRR <"cdr", 0x29, null_frag, FP64, FP64>; + def CXR : CompareRRE<"cxr", 0xB369, null_frag, FP128, FP128>; + + def CE : CompareRX<"ce", 0x79, null_frag, FP32, load, 4>; + def CD : CompareRX<"cd", 0x69, null_frag, FP64, load, 8>; +} + diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp index 3565d5f..4533f4f 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -12,11 +12,32 @@ //===----------------------------------------------------------------------===// #include "SystemZInstrInfo.h" +#include "MCTargetDesc/SystemZMCTargetDesc.h" +#include "SystemZ.h" #include "SystemZInstrBuilder.h" -#include "SystemZTargetMachine.h" -#include "llvm/CodeGen/LiveVariables.h" +#include "SystemZSubtarget.h" +#include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SlotIndexes.h" +#include "llvm/MC/MCInstrDesc.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/Support/BranchProbability.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetSubtargetInfo.h" +#include <cassert> +#include <cstdint> +#include <iterator> using namespace llvm; @@ -58,20 +79,34 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI); MBB->insert(MI, EarlierMI); - // Set up the two 64-bit registers. + // Set up the two 64-bit registers and remember super reg and its flags. MachineOperand &HighRegOp = EarlierMI->getOperand(0); MachineOperand &LowRegOp = MI->getOperand(0); + unsigned Reg128 = LowRegOp.getReg(); + unsigned Reg128Killed = getKillRegState(LowRegOp.isKill()); + unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef()); HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64)); LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64)); + if (MI->mayStore()) { + // Add implicit uses of the super register in case one of the subregs is + // undefined. We could track liveness and skip storing an undefined + // subreg, but this is hopefully rare (discovered with llvm-stress). + // If Reg128 was killed, set kill flag on MI. + unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit); + MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl); + MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed)); + } + // The address in the first (high) instruction is already correct. // Adjust the offset in the second (low) instruction. MachineOperand &HighOffsetOp = EarlierMI->getOperand(2); MachineOperand &LowOffsetOp = MI->getOperand(2); LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); - // Clear the kill flags for the base and index registers in the first - // instruction. + // Clear the kill flags on the registers in the first instruction. + if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse()) + EarlierMI->getOperand(0).setIsKill(false); EarlierMI->getOperand(1).setIsKill(false); EarlierMI->getOperand(3).setIsKill(false); @@ -131,7 +166,8 @@ void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode, MI.setDesc(get(LowOpcodeK)); else { emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg, - SystemZ::LR, 32, MI.getOperand(1).isKill()); + SystemZ::LR, 32, MI.getOperand(1).isKill(), + MI.getOperand(1).isUndef()); MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode)); MI.getOperand(1).setReg(DestReg); MI.tieOperands(0, 1); @@ -185,41 +221,45 @@ void SystemZInstrInfo::expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode, // are low registers, otherwise use RISB[LH]G. void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned Size) const { - emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), - MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode, - Size, MI.getOperand(1).isKill()); + MachineInstrBuilder MIB = + emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), + MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode, + Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef()); + + // Keep the remaining operands as-is. + for (unsigned I = 2; I < MI.getNumOperands(); ++I) + MIB.add(MI.getOperand(I)); + MI.eraseFromParent(); } void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const { MachineBasicBlock *MBB = MI->getParent(); MachineFunction &MF = *MBB->getParent(); - const unsigned Reg = MI->getOperand(0).getReg(); + const unsigned Reg64 = MI->getOperand(0).getReg(); + const unsigned Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32); - // Conveniently, all 4 instructions are cloned from LOAD_STACK_GUARD, - // so they already have operand 0 set to reg. + // EAR can only load the low subregister so us a shift for %a0 to produce + // the GR containing %a0 and %a1. // ear <reg>, %a0 - MachineInstr *Ear1MI = MF.CloneMachineInstr(MI); - MBB->insert(MI, Ear1MI); - Ear1MI->setDesc(get(SystemZ::EAR)); - MachineInstrBuilder(MF, Ear1MI).addReg(SystemZ::A0); + BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32) + .addReg(SystemZ::A0) + .addReg(Reg64, RegState::ImplicitDefine); // sllg <reg>, <reg>, 32 - MachineInstr *SllgMI = MF.CloneMachineInstr(MI); - MBB->insert(MI, SllgMI); - SllgMI->setDesc(get(SystemZ::SLLG)); - MachineInstrBuilder(MF, SllgMI).addReg(Reg).addReg(0).addImm(32); + BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64) + .addReg(Reg64) + .addReg(0) + .addImm(32); // ear <reg>, %a1 - MachineInstr *Ear2MI = MF.CloneMachineInstr(MI); - MBB->insert(MI, Ear2MI); - Ear2MI->setDesc(get(SystemZ::EAR)); - MachineInstrBuilder(MF, Ear2MI).addReg(SystemZ::A1); + BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32) + .addReg(SystemZ::A1); // lg <reg>, 40(<reg>) MI->setDesc(get(SystemZ::LG)); - MachineInstrBuilder(MF, MI).addReg(Reg).addImm(40).addReg(0); + MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0); } // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR @@ -227,11 +267,13 @@ void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const { // are low registers, otherwise use RISB[LH]G. Size is the number of bits // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR). // KillSrc is true if this move is the last use of SrcReg. -void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - const DebugLoc &DL, unsigned DestReg, - unsigned SrcReg, unsigned LowLowOpcode, - unsigned Size, bool KillSrc) const { +MachineInstrBuilder +SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, unsigned DestReg, + unsigned SrcReg, unsigned LowLowOpcode, + unsigned Size, bool KillSrc, + bool UndefSrc) const { unsigned Opcode; bool DestIsHigh = isHighReg(DestReg); bool SrcIsHigh = isHighReg(SrcReg); @@ -242,18 +284,16 @@ void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB, else if (!DestIsHigh && SrcIsHigh) Opcode = SystemZ::RISBLH; else { - BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg) - .addReg(SrcReg, getKillRegState(KillSrc)); - return; + return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc)); } unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0); - BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) + return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) .addReg(DestReg, RegState::Undef) - .addReg(SrcReg, getKillRegState(KillSrc)) + .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc)) .addImm(32 - Size).addImm(128 + 31).addImm(Rotate); } - MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, @@ -282,7 +322,6 @@ MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI, } } - // If MI is a simple load or store for a frame object, return the register // it loads or stores and set FrameIndex to the index of the frame object. // Return 0 otherwise. @@ -586,7 +625,6 @@ bool SystemZInstrInfo::optimizeCompareInstr( removeIPMBasedCompare(Compare, SrcReg, MRI, &RI); } - bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Pred, unsigned TrueReg, unsigned FalseReg, @@ -640,6 +678,12 @@ void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB, else { Opc = SystemZ::LOCR; MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass); + unsigned TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); + unsigned FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass); + BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg); + BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg); + TrueReg = TReg; + FalseReg = FReg; } } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) Opc = SystemZ::LOCGR; @@ -706,7 +750,7 @@ bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, return true; } -bool SystemZInstrInfo::isPredicable(MachineInstr &MI) const { +bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const { unsigned Opcode = MI.getOpcode(); if (Opcode == SystemZ::Return || Opcode == SystemZ::Trap || @@ -780,10 +824,11 @@ bool SystemZInstrInfo::PredicateInstruction( MI.RemoveOperand(0); MI.setDesc(get(SystemZ::CallBRCL)); MachineInstrBuilder(*MI.getParent()->getParent(), MI) - .addImm(CCValid).addImm(CCMask) - .addOperand(FirstOp) - .addRegMask(RegMask) - .addReg(SystemZ::CC, RegState::Implicit); + .addImm(CCValid) + .addImm(CCMask) + .add(FirstOp) + .addRegMask(RegMask) + .addReg(SystemZ::CC, RegState::Implicit); return true; } if (Opcode == SystemZ::CallBR) { @@ -803,17 +848,55 @@ void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { - // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too. + // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the + // super register in case one of the subregs is undefined. + // This handles ADDR128 too. if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64), RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc); + MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI)) + .addReg(SrcReg, RegState::Implicit); copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64), RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc); + MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI)) + .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit)); return; } if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) { - emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc); + emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc, + false); + return; + } + + // Move 128-bit floating-point values between VR128 and FP128. + if (SystemZ::VR128BitRegClass.contains(DestReg) && + SystemZ::FP128BitRegClass.contains(SrcReg)) { + unsigned SrcRegHi = + RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64), + SystemZ::subreg_r64, &SystemZ::VR128BitRegClass); + unsigned SrcRegLo = + RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64), + SystemZ::subreg_r64, &SystemZ::VR128BitRegClass); + + BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg) + .addReg(SrcRegHi, getKillRegState(KillSrc)) + .addReg(SrcRegLo, getKillRegState(KillSrc)); + return; + } + if (SystemZ::FP128BitRegClass.contains(DestReg) && + SystemZ::VR128BitRegClass.contains(SrcReg)) { + unsigned DestRegHi = + RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64), + SystemZ::subreg_r64, &SystemZ::VR128BitRegClass); + unsigned DestRegLo = + RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64), + SystemZ::subreg_r64, &SystemZ::VR128BitRegClass); + + if (DestRegHi != SrcReg) + copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false); + BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo) + .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1); return; } @@ -888,15 +971,19 @@ static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { } namespace { + struct LogicOp { - LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {} + LogicOp() = default; LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize) : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {} explicit operator bool() const { return RegSize; } - unsigned RegSize, ImmLSB, ImmSize; + unsigned RegSize = 0; + unsigned ImmLSB = 0; + unsigned ImmSize = 0; }; + } // end anonymous namespace static LogicOp interpretAndImmediate(unsigned Opcode) { @@ -976,12 +1063,12 @@ MachineInstr *SystemZInstrInfo::convertToThreeAddress( MachineInstrBuilder MIB( *MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(), /*NoImplicit=*/true)); - MIB.addOperand(Dest); + MIB.add(Dest); // Keep the kill state, but drop the tied flag. MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()); // Keep the remaining operands as-is. for (unsigned I = 2; I < NumOps; ++I) - MIB.addOperand(MI.getOperand(I)); + MIB.add(MI.getOperand(I)); MBB->insert(MI, MIB); return finishConvertToThreeAddress(&MI, MIB, LV); } @@ -1009,7 +1096,7 @@ MachineInstr *SystemZInstrInfo::convertToThreeAddress( MachineOperand &Src = MI.getOperand(1); MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()) @@ -1040,7 +1127,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MCRegUnitIterator CCUnit(SystemZ::CC, TRI); LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit); ++CCUnit; - assert (!CCUnit.isValid() && "CC only has one reg unit."); + assert(!CCUnit.isValid() && "CC only has one reg unit."); SlotIndex MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot(); if (!CCLiveRange.liveAt(MISlot)) { @@ -1063,10 +1150,9 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( return nullptr; unsigned OpNum = Ops[0]; - assert(Size == - MF.getRegInfo() - .getRegClass(MI.getOperand(OpNum).getReg()) - ->getSize() && + assert(Size * 8 == + TRI->getRegSizeInBits(*MF.getRegInfo() + .getRegClass(MI.getOperand(OpNum).getReg())) && "Invalid size combination"); if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 && @@ -1091,7 +1177,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(StoreOpcode)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addFrameIndex(FrameIndex) .addImm(0) .addReg(0); @@ -1100,12 +1186,12 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( // destination register instead. if (OpNum == 1) { unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; - unsigned Dest = MI.getOperand(0).getReg(); return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), - get(LoadOpcode), Dest) - .addFrameIndex(FrameIndex) - .addImm(0) - .addReg(0); + get(LoadOpcode)) + .add(MI.getOperand(0)) + .addFrameIndex(FrameIndex) + .addImm(0) + .addReg(0); } } @@ -1132,7 +1218,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( .addFrameIndex(FrameIndex) .addImm(0) .addImm(Size) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(MI.getOperand(2).getImm()) .addMemOperand(MMO); } @@ -1140,7 +1226,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) { return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(SystemZ::MVC)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(MI.getOperand(2).getImm()) .addImm(Size) .addFrameIndex(FrameIndex) @@ -1164,7 +1250,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(MemOpcode)); for (unsigned I = 0; I < OpNum; ++I) - MIB.addOperand(MI.getOperand(I)); + MIB.add(MI.getOperand(I)); MIB.addFrameIndex(FrameIndex).addImm(Offset); if (MemDesc.TSFlags & SystemZII::HasIndex) MIB.addReg(0); @@ -1379,6 +1465,7 @@ SystemZII::Branch SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const { switch (MI.getOpcode()) { case SystemZ::BR: + case SystemZ::BI: case SystemZ::J: case SystemZ::JG: return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h index 794b193..b8be1f5 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -16,16 +16,22 @@ #include "SystemZ.h" #include "SystemZRegisterInfo.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/Target/TargetInstrInfo.h" +#include <cstdint> #define GET_INSTRINFO_HEADER #include "SystemZGenInstrInfo.inc" namespace llvm { -class SystemZTargetMachine; +class SystemZSubtarget; namespace SystemZII { + enum { // See comments in SystemZInstrFormats.td. SimpleBDXLoad = (1 << 0), @@ -43,12 +49,15 @@ enum { CCMaskLast = (1 << 19), IsLogical = (1 << 20) }; + static inline unsigned getAccessSize(unsigned int Flags) { return (Flags & AccessSizeMask) >> AccessSizeShift; } + static inline unsigned getCCValues(unsigned int Flags) { return (Flags & CCValuesMask) >> CCValuesShift; } + static inline unsigned getCompareZeroCCMask(unsigned int Flags) { return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift; } @@ -64,6 +73,7 @@ enum { // @INDNTPOFF MO_INDNTPOFF = (2 << 0) }; + // Classifies a branch. enum BranchType { // An instruction that branches on the current value of CC. @@ -93,6 +103,7 @@ enum BranchType { // the result is nonzero. BranchCTG }; + // Information about a branch instruction. struct Branch { // The type of the branch. @@ -111,6 +122,7 @@ struct Branch { const MachineOperand *target) : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {} }; + // Kinds of fused compares in compare-and-* instructions. Together with type // of the converted compare, this identifies the compare-and-* // instruction. @@ -127,9 +139,9 @@ enum FusedCompareType { // Trap CompareAndTrap }; + } // end namespace SystemZII -class SystemZSubtarget; class SystemZInstrInfo : public SystemZGenInstrInfo { const SystemZRegisterInfo RI; SystemZSubtarget &STI; @@ -149,9 +161,13 @@ class SystemZInstrInfo : public SystemZGenInstrInfo { void expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode, unsigned Size) const; void expandLoadStackGuard(MachineInstr *MI) const; - void emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, - unsigned LowLowOpcode, unsigned Size, bool KillSrc) const; + + MachineInstrBuilder + emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, + unsigned LowLowOpcode, unsigned Size, bool KillSrc, + bool UndefSrc) const; + virtual void anchor(); protected: @@ -203,7 +219,7 @@ public: unsigned FalseReg) const override; bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg, MachineRegisterInfo *MRI) const override; - bool isPredicable(MachineInstr &MI) const override; + bool isPredicable(const MachineInstr &MI) const override; bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override; @@ -304,6 +320,7 @@ public: areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA = nullptr) const override; }; + } // end namespace llvm -#endif +#endif // LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZINSTRINFO_H diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td index d63525f..f64c0d1 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td @@ -12,8 +12,8 @@ //===----------------------------------------------------------------------===// let hasNoSchedulingInfo = 1 in { - def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt), - [(callseq_start timm:$amt)]>; + def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2), + [(callseq_start timm:$amt1, timm:$amt2)]>; def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2), [(callseq_end timm:$amt1, timm:$amt2)]>; } @@ -48,6 +48,8 @@ let isBranch = 1, isTerminator = 1, Uses = [CC] in { let isIndirectBranch = 1 in { def BC : CondBranchRX<"b#", 0x47>; def BCR : CondBranchRR<"b#r", 0x07>; + def BIC : CondBranchRXY<"bi#", 0xe347>, + Requires<[FeatureMiscellaneousExtensions2]>; } } @@ -58,6 +60,8 @@ let isBranch = 1, isTerminator = 1, Uses = [CC] in { let isIndirectBranch = 1 in { def BCAsm : AsmCondBranchRX<"bc", 0x47>; def BCRAsm : AsmCondBranchRR<"bcr", 0x07>; + def BICAsm : AsmCondBranchRXY<"bic", 0xe347>, + Requires<[FeatureMiscellaneousExtensions2]>; } // Define AsmParser extended mnemonics for each general condition-code mask @@ -69,6 +73,8 @@ let isBranch = 1, isTerminator = 1, Uses = [CC] in { let isIndirectBranch = 1 in { def BAsm#V : FixedCondBranchRX <CV<V>, "b#", 0x47>; def BRAsm#V : FixedCondBranchRR <CV<V>, "b#r", 0x07>; + def BIAsm#V : FixedCondBranchRXY<CV<V>, "bi#", 0xe347>, + Requires<[FeatureMiscellaneousExtensions2]>; } } } @@ -81,6 +87,8 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in { let isIndirectBranch = 1 in { def B : FixedCondBranchRX<CondAlways, "b", 0x47>; def BR : FixedCondBranchRR<CondAlways, "br", 0x07, brind>; + def BI : FixedCondBranchRXY<CondAlways, "bi", 0xe347, brind>, + Requires<[FeatureMiscellaneousExtensions2]>; } } @@ -189,18 +197,15 @@ let isBranch = 1, isTerminator = 1 in { //===----------------------------------------------------------------------===// // Unconditional trap. -// FIXME: This trap instruction should be marked as isTerminator, but there is -// currently a general bug that allows non-terminators to be placed between -// terminators. Temporarily leave this unmarked until the bug is fixed. -let isBarrier = 1, hasCtrlDep = 1 in +let hasCtrlDep = 1 in def Trap : Alias<4, (outs), (ins), [(trap)]>; // Conditional trap. -let isTerminator = 1, hasCtrlDep = 1, Uses = [CC] in +let hasCtrlDep = 1, Uses = [CC] in def CondTrap : Alias<4, (outs), (ins cond4:$valid, cond4:$R1), []>; // Fused compare-and-trap instructions. -let isTerminator = 1, hasCtrlDep = 1 in { +let hasCtrlDep = 1 in { // These patterns work the same way as for compare-and-branch. defm CRT : CmpBranchRRFcPair<"crt", 0xB972, GR32>; defm CGRT : CmpBranchRRFcPair<"cgrt", 0xB960, GR64>; @@ -319,9 +324,9 @@ let isReturn = 1, isTerminator = 1, hasCtrlDep = 1 in { // Select instructions //===----------------------------------------------------------------------===// -def Select32Mux : SelectWrapper<GRX32>, Requires<[FeatureHighWord]>; -def Select32 : SelectWrapper<GR32>; -def Select64 : SelectWrapper<GR64>; +def Select32Mux : SelectWrapper<i32, GRX32>, Requires<[FeatureHighWord]>; +def Select32 : SelectWrapper<i32, GR32>; +def Select64 : SelectWrapper<i64, GR64>; // We don't define 32-bit Mux stores if we don't have STOCFH, because the // low-only STOC should then always be used if possible. @@ -464,6 +469,11 @@ def MVGHI : StoreSIL<"mvghi", 0xE548, store, imm64sx16>; // Memory-to-memory moves. let mayLoad = 1, mayStore = 1 in defm MVC : MemorySS<"mvc", 0xD2, z_mvc, z_mvc_loop>; +let mayLoad = 1, mayStore = 1, Defs = [CC] in { + def MVCL : SideEffectBinaryMemMemRR<"mvcl", 0x0E, GR128, GR128>; + def MVCLE : SideEffectTernaryMemMemRS<"mvcle", 0xA8, GR128, GR128>; + def MVCLU : SideEffectTernaryMemMemRSY<"mvclu", 0xEB8E, GR128, GR128>; +} // String moves. let mayLoad = 1, mayStore = 1, Defs = [CC] in @@ -675,6 +685,22 @@ let Predicates = [FeatureLoadAndTrap] in { def LLGTAT : UnaryRXY<"llgtat", 0xE39C, null_frag, GR64, 4>; } +// Extend GR64s to GR128s. +let usesCustomInserter = 1 in + def ZEXT128 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>; + +//===----------------------------------------------------------------------===// +// "Any" extensions +//===----------------------------------------------------------------------===// + +// Use subregs to populate the "don't care" bits in a 32-bit to 64-bit anyext. +def : Pat<(i64 (anyext GR32:$src)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32)>; + +// Extend GR64s to GR128s. +let usesCustomInserter = 1 in + def AEXT128 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>; + //===----------------------------------------------------------------------===// // Truncations //===----------------------------------------------------------------------===// @@ -707,6 +733,10 @@ def : StoreGR64PC<STHRL, aligned_truncstorei16>; defm : StoreGR64Pair<ST, STY, truncstorei32>; def : StoreGR64PC<STRL, aligned_truncstorei32>; +// Store characters under mask -- not (yet) used for codegen. +defm STCM : StoreBinaryRSPair<"stcm", 0xBE, 0xEB2D, GR32, 0>; +def STCMH : StoreBinaryRSY<"stcmh", 0xEB2C, GRH32, 0>; + //===----------------------------------------------------------------------===// // Multi-register moves //===----------------------------------------------------------------------===// @@ -715,6 +745,7 @@ def : StoreGR64PC<STRL, aligned_truncstorei32>; defm LM : LoadMultipleRSPair<"lm", 0x98, 0xEB98, GR32>; def LMG : LoadMultipleRSY<"lmg", 0xEB04, GR64>; def LMH : LoadMultipleRSY<"lmh", 0xEB96, GRH32>; +def LMD : LoadMultipleSSe<"lmd", 0xEF, GR64>; // Multi-register stores. defm STM : StoreMultipleRSPair<"stm", 0x90, 0xEB90, GR32>; @@ -742,6 +773,10 @@ def STRVH : StoreRXY<"strvh", 0xE33F, z_strvh, GR32, 2>; def STRV : StoreRXY<"strv", 0xE33E, z_strv, GR32, 4>; def STRVG : StoreRXY<"strvg", 0xE32F, z_strvg, GR64, 8>; +// Byte-swapping memory-to-memory moves. +let mayLoad = 1, mayStore = 1 in + def MVCIN : SideEffectBinarySSa<"mvcin", 0xE8>; + //===----------------------------------------------------------------------===// // Load address instructions //===----------------------------------------------------------------------===// @@ -816,6 +851,7 @@ defm : InsertMem<"inserti8", IC32Y, GR32, azextloadi8, bdxaddr20pair>; defm : InsertMem<"inserti8", IC, GR64, azextloadi8, bdxaddr12pair>; defm : InsertMem<"inserti8", ICY, GR64, azextloadi8, bdxaddr20pair>; +// Insert characters under mask -- not (yet) used for codegen. let Defs = [CC] in { defm ICM : TernaryRSPair<"icm", 0xBF, 0xEB81, GR32, 0>; def ICMH : TernaryRSY<"icmh", 0xEB80, GRH32, 0>; @@ -871,6 +907,12 @@ let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in { } def AGFR : BinaryRRE<"agfr", 0xB918, null_frag, GR64, GR32>; + // Addition to a high register. + def AHHHR : BinaryRRFa<"ahhhr", 0xB9C8, null_frag, GRH32, GRH32, GRH32>, + Requires<[FeatureHighWord]>; + def AHHLR : BinaryRRFa<"ahhlr", 0xB9D8, null_frag, GRH32, GRH32, GR32>, + Requires<[FeatureHighWord]>; + // Addition of signed 16-bit immediates. defm AHIMux : BinaryRIAndKPseudo<"ahimux", add, GRX32, imm32sx16>; defm AHI : BinaryRIAndK<"ahi", 0xA7A, 0xECD8, add, GR32, imm32sx16>; @@ -887,6 +929,8 @@ let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in { // Addition of memory. defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, asextloadi16, 2>; defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load, 4>; + def AGH : BinaryRXY<"agh", 0xE338, add, GR64, asextloadi16, 2>, + Requires<[FeatureMiscellaneousExtensions2]>; def AGF : BinaryRXY<"agf", 0xE318, add, GR64, asextloadi32, 4>; def AG : BinaryRXY<"ag", 0xE308, add, GR64, load, 8>; @@ -905,6 +949,12 @@ let Defs = [CC] in { } def ALGFR : BinaryRRE<"algfr", 0xB91A, null_frag, GR64, GR32>; + // Addition to a high register. + def ALHHHR : BinaryRRFa<"alhhhr", 0xB9CA, null_frag, GRH32, GRH32, GRH32>, + Requires<[FeatureHighWord]>; + def ALHHLR : BinaryRRFa<"alhhlr", 0xB9DA, null_frag, GRH32, GRH32, GR32>, + Requires<[FeatureHighWord]>; + // Addition of signed 16-bit immediates. def ALHSIK : BinaryRIE<"alhsik", 0xECDA, addc, GR32, imm32sx16>, Requires<[FeatureDistinctOps]>; @@ -915,10 +965,18 @@ let Defs = [CC] in { def ALFI : BinaryRIL<"alfi", 0xC2B, addc, GR32, uimm32>; def ALGFI : BinaryRIL<"algfi", 0xC2A, addc, GR64, imm64zx32>; + // Addition of signed 32-bit immediates. + def ALSIH : BinaryRIL<"alsih", 0xCCA, null_frag, GRH32, simm32>, + Requires<[FeatureHighWord]>; + // Addition of memory. defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load, 4>; def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, azextloadi32, 4>; def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load, 8>; + + // Addition to memory. + def ALSI : BinarySIY<"alsi", 0xEB6E, null_frag, imm32sx8>; + def ALGSI : BinarySIY<"algsi", 0xEB7E, null_frag, imm64sx8>; } defm : ZXB<addc, GR64, ALGFR>; @@ -933,6 +991,10 @@ let Defs = [CC], Uses = [CC] in { def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load, 8>; } +// Addition that does not modify the condition code. +def ALSIHN : BinaryRIL<"alsihn", 0xCCB, null_frag, GRH32, simm32>, + Requires<[FeatureHighWord]>; + //===----------------------------------------------------------------------===// // Subtraction //===----------------------------------------------------------------------===// @@ -945,9 +1007,17 @@ let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in { def SGFR : BinaryRRE<"sgfr", 0xB919, null_frag, GR64, GR32>; defm SGR : BinaryRREAndK<"sgr", 0xB909, 0xB9E9, sub, GR64, GR64>; + // Subtraction from a high register. + def SHHHR : BinaryRRFa<"shhhr", 0xB9C9, null_frag, GRH32, GRH32, GRH32>, + Requires<[FeatureHighWord]>; + def SHHLR : BinaryRRFa<"shhlr", 0xB9D9, null_frag, GRH32, GRH32, GR32>, + Requires<[FeatureHighWord]>; + // Subtraction of memory. defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, asextloadi16, 2>; defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load, 4>; + def SGH : BinaryRXY<"sgh", 0xE339, sub, GR64, asextloadi16, 2>, + Requires<[FeatureMiscellaneousExtensions2]>; def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, asextloadi32, 4>; def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load, 8>; } @@ -960,6 +1030,12 @@ let Defs = [CC] in { def SLGFR : BinaryRRE<"slgfr", 0xB91B, null_frag, GR64, GR32>; defm SLGR : BinaryRREAndK<"slgr", 0xB90B, 0xB9EB, subc, GR64, GR64>; + // Subtraction from a high register. + def SLHHHR : BinaryRRFa<"slhhhr", 0xB9CB, null_frag, GRH32, GRH32, GRH32>, + Requires<[FeatureHighWord]>; + def SLHHLR : BinaryRRFa<"slhhlr", 0xB9DB, null_frag, GRH32, GRH32, GR32>, + Requires<[FeatureHighWord]>; + // Subtraction of unsigned 32-bit immediates. These don't match // subc because we prefer addc for constants. def SLFI : BinaryRIL<"slfi", 0xC25, null_frag, GR32, uimm32>; @@ -1143,6 +1219,15 @@ defm : RMWIByte<xor, bdaddr20pair, XIY>; // Multiplication //===----------------------------------------------------------------------===// +// Multiplication of a register, setting the condition code. We prefer these +// over MS(G)R if available, even though we cannot use the condition code, +// since they are three-operand instructions. +let Predicates = [FeatureMiscellaneousExtensions2], + Defs = [CC], isCommutable = 1 in { + def MSRKC : BinaryRRFa<"msrkc", 0xB9FD, mul, GR32, GR32, GR32>; + def MSGRKC : BinaryRRFa<"msgrkc", 0xB9ED, mul, GR64, GR64, GR64>; +} + // Multiplication of a register. let isCommutable = 1 in { def MSR : BinaryRRE<"msr", 0xB252, mul, GR32, GR32>; @@ -1162,14 +1247,39 @@ def MSGFI : BinaryRIL<"msgfi", 0xC20, mul, GR64, imm64sx32>; // Multiplication of memory. defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, asextloadi16, 2>; defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load, 4>; +def MGH : BinaryRXY<"mgh", 0xE33C, mul, GR64, asextloadi16, 2>, + Requires<[FeatureMiscellaneousExtensions2]>; def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, asextloadi32, 4>; def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load, 8>; +// Multiplication of memory, setting the condition code. +let Predicates = [FeatureMiscellaneousExtensions2], Defs = [CC] in { + def MSC : BinaryRXY<"msc", 0xE353, null_frag, GR32, load, 4>; + def MSGC : BinaryRXY<"msgc", 0xE383, null_frag, GR64, load, 8>; +} + // Multiplication of a register, producing two results. -def MLGR : BinaryRRE<"mlgr", 0xB986, z_umul_lohi64, GR128, GR64>; +def MR : BinaryRR <"mr", 0x1C, null_frag, GR128, GR32>; +def MGRK : BinaryRRFa<"mgrk", 0xB9EC, null_frag, GR128, GR64, GR64>, + Requires<[FeatureMiscellaneousExtensions2]>; +def MLR : BinaryRRE<"mlr", 0xB996, null_frag, GR128, GR32>; +def MLGR : BinaryRRE<"mlgr", 0xB986, null_frag, GR128, GR64>; +def : Pat<(z_smul_lohi GR64:$src1, GR64:$src2), + (MGRK GR64:$src1, GR64:$src2)>; +def : Pat<(z_umul_lohi GR64:$src1, GR64:$src2), + (MLGR (AEXT128 GR64:$src1), GR64:$src2)>; // Multiplication of memory, producing two results. -def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>; +def M : BinaryRX <"m", 0x5C, null_frag, GR128, load, 4>; +def MFY : BinaryRXY<"mfy", 0xE35C, null_frag, GR128, load, 4>; +def MG : BinaryRXY<"mg", 0xE384, null_frag, GR128, load, 8>, + Requires<[FeatureMiscellaneousExtensions2]>; +def ML : BinaryRXY<"ml", 0xE396, null_frag, GR128, load, 4>; +def MLG : BinaryRXY<"mlg", 0xE386, null_frag, GR128, load, 8>; +def : Pat<(z_smul_lohi GR64:$src1, (i64 (load bdxaddr20only:$src2))), + (MG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>; +def : Pat<(z_umul_lohi GR64:$src1, (i64 (load bdxaddr20only:$src2))), + (MLG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>; //===----------------------------------------------------------------------===// // Division and remainder @@ -1177,39 +1287,69 @@ def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>; let hasSideEffects = 1 in { // Do not speculatively execute. // Division and remainder, from registers. - def DSGFR : BinaryRRE<"dsgfr", 0xB91D, z_sdivrem32, GR128, GR32>; - def DSGR : BinaryRRE<"dsgr", 0xB90D, z_sdivrem64, GR128, GR64>; - def DLR : BinaryRRE<"dlr", 0xB997, z_udivrem32, GR128, GR32>; - def DLGR : BinaryRRE<"dlgr", 0xB987, z_udivrem64, GR128, GR64>; + def DR : BinaryRR <"dr", 0x1D, null_frag, GR128, GR32>; + def DSGFR : BinaryRRE<"dsgfr", 0xB91D, null_frag, GR128, GR32>; + def DSGR : BinaryRRE<"dsgr", 0xB90D, null_frag, GR128, GR64>; + def DLR : BinaryRRE<"dlr", 0xB997, null_frag, GR128, GR32>; + def DLGR : BinaryRRE<"dlgr", 0xB987, null_frag, GR128, GR64>; // Division and remainder, from memory. - def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem32, GR128, load, 4>; - def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load, 8>; - def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load, 4>; - def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>; -} + def D : BinaryRX <"d", 0x5D, null_frag, GR128, load, 4>; + def DSGF : BinaryRXY<"dsgf", 0xE31D, null_frag, GR128, load, 4>; + def DSG : BinaryRXY<"dsg", 0xE30D, null_frag, GR128, load, 8>; + def DL : BinaryRXY<"dl", 0xE397, null_frag, GR128, load, 4>; + def DLG : BinaryRXY<"dlg", 0xE387, null_frag, GR128, load, 8>; +} +def : Pat<(z_sdivrem GR64:$src1, GR32:$src2), + (DSGFR (AEXT128 GR64:$src1), GR32:$src2)>; +def : Pat<(z_sdivrem GR64:$src1, (i32 (load bdxaddr20only:$src2))), + (DSGF (AEXT128 GR64:$src1), bdxaddr20only:$src2)>; +def : Pat<(z_sdivrem GR64:$src1, GR64:$src2), + (DSGR (AEXT128 GR64:$src1), GR64:$src2)>; +def : Pat<(z_sdivrem GR64:$src1, (i64 (load bdxaddr20only:$src2))), + (DSG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>; + +def : Pat<(z_udivrem GR32:$src1, GR32:$src2), + (DLR (ZEXT128 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src1, + subreg_l32)), GR32:$src2)>; +def : Pat<(z_udivrem GR32:$src1, (i32 (load bdxaddr20only:$src2))), + (DL (ZEXT128 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src1, + subreg_l32)), bdxaddr20only:$src2)>; +def : Pat<(z_udivrem GR64:$src1, GR64:$src2), + (DLGR (ZEXT128 GR64:$src1), GR64:$src2)>; +def : Pat<(z_udivrem GR64:$src1, (i64 (load bdxaddr20only:$src2))), + (DLG (ZEXT128 GR64:$src1), bdxaddr20only:$src2)>; //===----------------------------------------------------------------------===// // Shifts //===----------------------------------------------------------------------===// -// Shift left. +// Logical shift left. let hasSideEffects = 0 in { defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>; - defm SLA : BinaryRSAndK<"sla", 0x8B, 0xEBDD, null_frag, GR32>; def SLLG : BinaryRSY<"sllg", 0xEB0D, shl, GR64>; + def SLDL : BinaryRS<"sldl", 0x8D, null_frag, GR128>; +} + +// Arithmetic shift left. +let Defs = [CC] in { + defm SLA : BinaryRSAndK<"sla", 0x8B, 0xEBDD, null_frag, GR32>; + def SLAG : BinaryRSY<"slag", 0xEB0B, null_frag, GR64>; + def SLDA : BinaryRS<"slda", 0x8F, null_frag, GR128>; } // Logical shift right. let hasSideEffects = 0 in { defm SRL : BinaryRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>; def SRLG : BinaryRSY<"srlg", 0xEB0C, srl, GR64>; + def SRDL : BinaryRS<"srdl", 0x8C, null_frag, GR128>; } // Arithmetic shift right. let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in { defm SRA : BinaryRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>; def SRAG : BinaryRSY<"srag", 0xEB0A, sra, GR64>; + def SRDA : BinaryRS<"srda", 0x8E, null_frag, GR128>; } // Rotate left. @@ -1266,6 +1406,12 @@ let Defs = [CC], CCValues = 0xE in { def CGFR : CompareRRE<"cgfr", 0xB930, null_frag, GR64, GR32>; def CGR : CompareRRE<"cgr", 0xB920, z_scmp, GR64, GR64>; + // Comparison with a high register. + def CHHR : CompareRRE<"chhr", 0xB9CD, null_frag, GRH32, GRH32>, + Requires<[FeatureHighWord]>; + def CHLR : CompareRRE<"chlr", 0xB9DD, null_frag, GRH32, GR32>, + Requires<[FeatureHighWord]>; + // Comparison with a signed 16-bit immediate. CHIMux expands to CHI or CIH, // depending on the choice of register. def CHIMux : CompareRIPseudo<z_scmp, GRX32, imm32sx16>, @@ -1312,6 +1458,12 @@ let Defs = [CC], CCValues = 0xE, IsLogical = 1 in { def CLGFR : CompareRRE<"clgfr", 0xB931, null_frag, GR64, GR32>; def CLGR : CompareRRE<"clgr", 0xB921, z_ucmp, GR64, GR64>; + // Comparison with a high register. + def CLHHR : CompareRRE<"clhhr", 0xB9CF, null_frag, GRH32, GRH32>, + Requires<[FeatureHighWord]>; + def CLHLR : CompareRRE<"clhlr", 0xB9DF, null_frag, GRH32, GR32>, + Requires<[FeatureHighWord]>; + // Comparison with an unsigned 32-bit immediate. CLFIMux expands to CLFI // or CLIH, depending on the choice of register. def CLFIMux : CompareRIPseudo<z_ucmp, GRX32, uimm32>, @@ -1351,8 +1503,12 @@ let Defs = [CC], CCValues = 0xE, IsLogical = 1 in { defm : ZXB<z_ucmp, GR64, CLGFR>; // Memory-to-memory comparison. -let mayLoad = 1, Defs = [CC] in +let mayLoad = 1, Defs = [CC] in { defm CLC : MemorySS<"clc", 0xD5, z_clc, z_clc_loop>; + def CLCL : SideEffectBinaryMemMemRR<"clcl", 0x0F, GR128, GR128>; + def CLCLE : SideEffectTernaryMemMemRS<"clcle", 0xA9, GR128, GR128>; + def CLCLU : SideEffectTernaryMemMemRSY<"clclu", 0xEB8F, GR128, GR128>; +} // String comparison. let mayLoad = 1, Defs = [CC] in @@ -1381,6 +1537,12 @@ let Defs = [CC] in { def TML : InstAlias<"tml\t$R, $I", (TMLL GR32:$R, imm32ll16:$I), 0>; def TMH : InstAlias<"tmh\t$R, $I", (TMLH GR32:$R, imm32lh16:$I), 0>; +// Compare logical characters under mask -- not (yet) used for codegen. +let Defs = [CC] in { + defm CLM : CompareRSPair<"clm", 0xBD, 0xEB21, GR32, 0>; + def CLMH : CompareRSY<"clmh", 0xEB20, GRH32, 0>; +} + //===----------------------------------------------------------------------===// // Prefetch and execution hint //===----------------------------------------------------------------------===// @@ -1404,7 +1566,7 @@ let Predicates = [FeatureExecutionHint] in { // A serialization instruction that acts as a barrier for all memory // accesses, which expands to "bcr 14, 0". let hasSideEffects = 1 in -def Serialize : Alias<2, (outs), (ins), [(z_serialize)]>; +def Serialize : Alias<2, (outs), (ins), []>; // A pseudo instruction that serves as a compiler barrier. let hasSideEffects = 1, hasNoSchedulingInfo = 1 in @@ -1581,6 +1743,136 @@ let Predicates = [FeatureInterlockedAccess1], Defs = [CC] in { } //===----------------------------------------------------------------------===// +// Translate and convert +//===----------------------------------------------------------------------===// + +let mayLoad = 1, mayStore = 1 in + def TR : SideEffectBinarySSa<"tr", 0xDC>; + +let mayLoad = 1, Defs = [CC, R0L, R1D] in { + def TRT : SideEffectBinarySSa<"trt", 0xDD>; + def TRTR : SideEffectBinarySSa<"trtr", 0xD0>; +} + +let mayLoad = 1, mayStore = 1, Uses = [R0L] in + def TRE : SideEffectBinaryMemMemRRE<"tre", 0xB2A5, GR128, GR64>; + +let mayLoad = 1, Uses = [R1D], Defs = [CC] in { + defm TRTE : BinaryMemRRFcOpt<"trte", 0xB9BF, GR128, GR64>; + defm TRTRE : BinaryMemRRFcOpt<"trtre", 0xB9BD, GR128, GR64>; +} + +let mayLoad = 1, mayStore = 1, Uses = [R0L, R1D], Defs = [CC] in { + defm TROO : SideEffectTernaryMemMemRRFcOpt<"troo", 0xB993, GR128, GR64>; + defm TROT : SideEffectTernaryMemMemRRFcOpt<"trot", 0xB992, GR128, GR64>; + defm TRTO : SideEffectTernaryMemMemRRFcOpt<"trto", 0xB991, GR128, GR64>; + defm TRTT : SideEffectTernaryMemMemRRFcOpt<"trtt", 0xB990, GR128, GR64>; +} + +let mayLoad = 1, mayStore = 1, Defs = [CC] in { + defm CU12 : SideEffectTernaryMemMemRRFcOpt<"cu12", 0xB2A7, GR128, GR128>; + defm CU14 : SideEffectTernaryMemMemRRFcOpt<"cu14", 0xB9B0, GR128, GR128>; + defm CU21 : SideEffectTernaryMemMemRRFcOpt<"cu21", 0xB2A6, GR128, GR128>; + defm CU24 : SideEffectTernaryMemMemRRFcOpt<"cu24", 0xB9B1, GR128, GR128>; + def CU41 : SideEffectBinaryMemMemRRE<"cu41", 0xB9B2, GR128, GR128>; + def CU42 : SideEffectBinaryMemMemRRE<"cu42", 0xB9B3, GR128, GR128>; + + let isAsmParserOnly = 1 in { + defm CUUTF : SideEffectTernaryMemMemRRFcOpt<"cuutf", 0xB2A6, GR128, GR128>; + defm CUTFU : SideEffectTernaryMemMemRRFcOpt<"cutfu", 0xB2A7, GR128, GR128>; + } +} + +//===----------------------------------------------------------------------===// +// Message-security assist +//===----------------------------------------------------------------------===// + +let mayLoad = 1, mayStore = 1, Uses = [R0L, R1D], Defs = [CC] in { + def KM : SideEffectBinaryMemMemRRE<"km", 0xB92E, GR128, GR128>; + def KMC : SideEffectBinaryMemMemRRE<"kmc", 0xB92F, GR128, GR128>; + + def KIMD : SideEffectBinaryMemRRE<"kimd", 0xB93E, GR64, GR128>; + def KLMD : SideEffectBinaryMemRRE<"klmd", 0xB93F, GR64, GR128>; + def KMAC : SideEffectBinaryMemRRE<"kmac", 0xB91E, GR64, GR128>; + + let Predicates = [FeatureMessageSecurityAssist4] in { + def KMF : SideEffectBinaryMemMemRRE<"kmf", 0xB92A, GR128, GR128>; + def KMO : SideEffectBinaryMemMemRRE<"kmo", 0xB92B, GR128, GR128>; + def KMCTR : SideEffectTernaryMemMemMemRRFb<"kmctr", 0xB92D, + GR128, GR128, GR128>; + def PCC : SideEffectInherentRRE<"pcc", 0xB92C>; + } + + let Predicates = [FeatureMessageSecurityAssist5] in + def PPNO : SideEffectBinaryMemMemRRE<"ppno", 0xB93C, GR128, GR128>; + let Predicates = [FeatureMessageSecurityAssist7], isAsmParserOnly = 1 in + def PRNO : SideEffectBinaryMemMemRRE<"prno", 0xB93C, GR128, GR128>; + + let Predicates = [FeatureMessageSecurityAssist8] in + def KMA : SideEffectTernaryMemMemMemRRFb<"kma", 0xB929, + GR128, GR128, GR128>; +} + +//===----------------------------------------------------------------------===// +// Guarded storage +//===----------------------------------------------------------------------===// + +let Predicates = [FeatureGuardedStorage] in { + def LGG : UnaryRXY<"lgg", 0xE34C, null_frag, GR64, 8>; + def LLGFSG : UnaryRXY<"llgfsg", 0xE348, null_frag, GR64, 4>; + + let mayLoad = 1 in + def LGSC : SideEffectBinaryRXY<"lgsc", 0xE34D, GR64>; + let mayStore = 1 in + def STGSC : SideEffectBinaryRXY<"stgsc", 0xE349, GR64>; +} + +//===----------------------------------------------------------------------===// +// Decimal arithmetic +//===----------------------------------------------------------------------===// + +defm CVB : BinaryRXPair<"cvb",0x4F, 0xE306, null_frag, GR32, load, 4>; +def CVBG : BinaryRXY<"cvbg", 0xE30E, null_frag, GR64, load, 8>; + +defm CVD : StoreRXPair<"cvd", 0x4E, 0xE326, null_frag, GR32, 4>; +def CVDG : StoreRXY<"cvdg", 0xE32E, null_frag, GR64, 8>; + +let mayLoad = 1, mayStore = 1 in { + def MVN : SideEffectBinarySSa<"mvn", 0xD1>; + def MVZ : SideEffectBinarySSa<"mvz", 0xD3>; + def MVO : SideEffectBinarySSb<"mvo", 0xF1>; + + def PACK : SideEffectBinarySSb<"pack", 0xF2>; + def PKA : SideEffectBinarySSf<"pka", 0xE9>; + def PKU : SideEffectBinarySSf<"pku", 0xE1>; + def UNPK : SideEffectBinarySSb<"unpk", 0xF3>; + let Defs = [CC] in { + def UNPKA : SideEffectBinarySSa<"unpka", 0xEA>; + def UNPKU : SideEffectBinarySSa<"unpku", 0xE2>; + } +} + +let mayLoad = 1, mayStore = 1 in { + let Defs = [CC] in { + def AP : SideEffectBinarySSb<"ap", 0xFA>; + def SP : SideEffectBinarySSb<"sp", 0xFB>; + def ZAP : SideEffectBinarySSb<"zap", 0xF8>; + def SRP : SideEffectTernarySSc<"srp", 0xF0>; + } + def MP : SideEffectBinarySSb<"mp", 0xFC>; + def DP : SideEffectBinarySSb<"dp", 0xFD>; + let Defs = [CC] in { + def ED : SideEffectBinarySSa<"ed", 0xDE>; + def EDMK : SideEffectBinarySSa<"edmk", 0xDF>; + } +} + +let Defs = [CC] in { + def CP : CompareSSb<"cp", 0xF9>; + def TP : TestRSL<"tp", 0xEBC0>; +} + +//===----------------------------------------------------------------------===// // Access registers //===----------------------------------------------------------------------===// @@ -1699,39 +1991,32 @@ def : Pat<(ctlz GR64:$src), let Predicates = [FeaturePopulationCount], Defs = [CC] in def POPCNT : UnaryRRE<"popcnt", 0xB9E1, z_popcnt, GR64, GR64>; -// Use subregs to populate the "don't care" bits in a 32-bit to 64-bit anyext. -def : Pat<(i64 (anyext GR32:$src)), - (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32)>; - -// Extend GR32s and GR64s to GR128s. -let usesCustomInserter = 1 in { - def AEXT128_64 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>; - def ZEXT128_32 : Pseudo<(outs GR128:$dst), (ins GR32:$src), []>; - def ZEXT128_64 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>; -} - // Search a block of memory for a character. let mayLoad = 1, Defs = [CC] in - defm SRST : StringRRE<"srst", 0xb25e, z_search_string>; + defm SRST : StringRRE<"srst", 0xB25E, z_search_string>; +let mayLoad = 1, Defs = [CC], Uses = [R0L] in + def SRSTU : SideEffectBinaryMemMemRRE<"srstu", 0xB9BE, GR64, GR64>; -// Supervisor call. -let hasSideEffects = 1, isCall = 1, Defs = [CC] in - def SVC : SideEffectUnaryI<"svc", 0x0A, imm32zx8>; +// Compare until substring equal. +let mayLoad = 1, Defs = [CC], Uses = [R0L, R1L] in + def CUSE : SideEffectBinaryMemMemRRE<"cuse", 0xB257, GR128, GR128>; -// Store clock. -let hasSideEffects = 1, Defs = [CC] in { - def STCK : StoreInherentS<"stck", 0xB205, null_frag, 8>; - def STCKF : StoreInherentS<"stckf", 0xB27C, null_frag, 8>; - def STCKE : StoreInherentS<"stcke", 0xB278, null_frag, 16>; -} +// Compare and form codeword. +let mayLoad = 1, Defs = [CC, R1D, R2D, R3D], Uses = [R1D, R2D, R3D] in + def CFC : SideEffectAddressS<"cfc", 0xB21A, null_frag>; -// Store facility list. -let hasSideEffects = 1, Uses = [R0D], Defs = [R0D, CC] in - def STFLE : StoreInherentS<"stfle", 0xB2B0, null_frag, 0>; +// Update tree. +let mayLoad = 1, mayStore = 1, Defs = [CC, R0D, R1D, R2D, R3D, R5D], + Uses = [R0D, R1D, R2D, R3D, R4D, R5D] in + def UPT : SideEffectInherentE<"upt", 0x0102>; -// Extract CPU time. -let Defs = [R0D, R1D], hasSideEffects = 1, mayLoad = 1 in - def ECTG : SideEffectTernarySSF<"ectg", 0xC81, GR64>; +// Checksum. +let mayLoad = 1, Defs = [CC] in + def CKSM : SideEffectBinaryMemMemRRE<"cksm", 0xB241, GR64, GR128>; + +// Compression call. +let mayLoad = 1, mayStore = 1, Defs = [CC, R1D], Uses = [R0L, R1D] in + def CMPSC : SideEffectBinaryMemMemRRE<"cmpsc", 0xB263, GR128, GR128>; // Execute. let hasSideEffects = 1 in { @@ -1739,17 +2024,6 @@ let hasSideEffects = 1 in { def EXRL : SideEffectBinaryRILPC<"exrl", 0xC60, GR64>; } -// Program return. -let hasSideEffects = 1, Defs = [CC] in - def PR : SideEffectInherentE<"pr", 0x0101>; - -// Move with key. -let mayLoad = 1, mayStore = 1, Defs = [CC] in - def MVCK : MemoryBinarySSd<"mvck", 0xD9, GR64>; - -// Store real address. -def STRAG : StoreSSE<"strag", 0xE502>; - //===----------------------------------------------------------------------===// // .insn directive instructions //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrSystem.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrSystem.td new file mode 100644 index 0000000..0112ebf --- /dev/null +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrSystem.td @@ -0,0 +1,521 @@ +//==- SystemZInstrSystem.td - SystemZ system instructions -*- tblgen-*-----==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The instructions in this file implement SystemZ system-level instructions. +// Most of these instructions are privileged or semi-privileged. They are +// not used for code generation, but are provided for use with the assembler +// and disassembler only. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Program-Status Word Instructions. +//===----------------------------------------------------------------------===// + +// Extract PSW. +let hasSideEffects = 1, Uses = [CC] in + def EPSW : InherentDualRRE<"epsw", 0xB98D, GR32>; + +// Load PSW (extended). +let hasSideEffects = 1, Defs = [CC], mayLoad = 1 in { + def LPSW : SideEffectUnaryS<"lpsw", 0x8200, null_frag, 8>; + def LPSWE : SideEffectUnaryS<"lpswe", 0xB2B2, null_frag, 16>; +} + +// Insert PSW key. +let Uses = [R2L], Defs = [R2L] in + def IPK : SideEffectInherentS<"ipk", 0xB20B, null_frag>; + +// Set PSW key from address. +let hasSideEffects = 1 in + def SPKA : SideEffectAddressS<"spka", 0xB20A, null_frag>; + +// Set system mask. +let hasSideEffects = 1, mayLoad = 1 in + def SSM : SideEffectUnaryS<"ssm", 0x8000, null_frag, 1>; + +// Store then AND/OR system mask. +let hasSideEffects = 1 in { + def STNSM : StoreSI<"stnsm", 0xAC, null_frag, imm32zx8>; + def STOSM : StoreSI<"stosm", 0xAD, null_frag, imm32zx8>; +} + +// Insert address space control. +let hasSideEffects = 1 in + def IAC : InherentRRE<"iac", 0xB224, GR32, null_frag>; + +// Set address space control (fast). +let hasSideEffects = 1 in { + def SAC : SideEffectAddressS<"sac", 0xB219, null_frag>; + def SACF : SideEffectAddressS<"sacf", 0xB279, null_frag>; +} + +//===----------------------------------------------------------------------===// +// Control Register Instructions. +//===----------------------------------------------------------------------===// + +// Load control. +def LCTL : LoadMultipleRS<"lctl", 0xB7, CR64>; +def LCTLG : LoadMultipleRSY<"lctlg", 0xEB2F, CR64>; + +// Store control. +def STCTL : StoreMultipleRS<"stctl", 0xB6, CR64>; +def STCTG : StoreMultipleRSY<"stctg", 0xEB25, CR64>; + +// Extract primary ASN (and instance). +let hasSideEffects = 1 in { + def EPAR : InherentRRE<"epar", 0xB226, GR32, null_frag>; + def EPAIR : InherentRRE<"epair", 0xB99A, GR64, null_frag>; +} + +// Extract secondary ASN (and instance). +let hasSideEffects = 1 in { + def ESAR : InherentRRE<"esar", 0xB227, GR32, null_frag>; + def ESAIR : InherentRRE<"esair", 0xB99B, GR64, null_frag>; +} + +// Set secondary ASN (and instance). +let hasSideEffects = 1 in { + def SSAR : SideEffectUnaryRRE<"ssar", 0xB225, GR32, null_frag>; + def SSAIR : SideEffectUnaryRRE<"ssair", 0xB99F, GR64, null_frag>; +} + +// Extract and set extended authority. +let hasSideEffects = 1 in + def ESEA : UnaryTiedRRE<"esea", 0xB99D, GR32>; + +//===----------------------------------------------------------------------===// +// Prefix-Register Instructions. +//===----------------------------------------------------------------------===// + +// Set prefix. +let hasSideEffects = 1 in + def SPX : SideEffectUnaryS<"spx", 0xB210, null_frag, 4>; + +// Store prefix. +let hasSideEffects = 1 in + def STPX : StoreInherentS<"stpx", 0xB211, null_frag, 4>; + +//===----------------------------------------------------------------------===// +// Storage-Key and Real Memory Instructions. +//===----------------------------------------------------------------------===// + +// Insert storage key extended. +let hasSideEffects = 1 in + def ISKE : BinaryRRE<"iske", 0xB229, null_frag, GR32, GR64>; + +// Insert virtual storage key. +let hasSideEffects = 1 in + def IVSK : BinaryRRE<"ivsk", 0xB223, null_frag, GR32, GR64>; + +// Set storage key extended. +let hasSideEffects = 1, Defs = [CC] in + defm SSKE : SideEffectTernaryRRFcOpt<"sske", 0xB22B, GR32, GR64>; + +// Reset reference bit extended. +let hasSideEffects = 1, Defs = [CC] in + def RRBE : SideEffectBinaryRRE<"rrbe", 0xB22A, GR32, GR64>; + +// Reset reference bits multiple. +let Predicates = [FeatureResetReferenceBitsMultiple], hasSideEffects = 1 in + def RRBM : UnaryRRE<"rrbm", 0xB9AE, null_frag, GR64, GR64>; + +// Insert reference bits multiple. +let Predicates = [FeatureInsertReferenceBitsMultiple], hasSideEffects = 1 in + def IRBM : UnaryRRE<"irbm", 0xB9AC, null_frag, GR64, GR64>; + +// Perform frame management function. +let hasSideEffects = 1 in + def PFMF : SideEffectBinaryMemRRE<"pfmf", 0xB9AF, GR32, GR64>; + +// Test block. +let hasSideEffects = 1, mayStore = 1, Uses = [R0D], Defs = [R0D, CC] in + def TB : SideEffectBinaryRRE<"tb", 0xB22C, GR64, GR64>; + +// Page in / out. +let mayLoad = 1, mayStore = 1, Defs = [CC] in { + def PGIN : SideEffectBinaryRRE<"pgin", 0xB22E, GR64, GR64>; + def PGOUT : SideEffectBinaryRRE<"pgout", 0xB22F, GR64, GR64>; +} + +//===----------------------------------------------------------------------===// +// Dynamic-Address-Translation Instructions. +//===----------------------------------------------------------------------===// + +// Invalidate page table entry. +let hasSideEffects = 1 in + defm IPTE : SideEffectQuaternaryRRFaOptOpt<"ipte", 0xB221, GR64, GR32, GR32>; + +// Invalidate DAT table entry. +let hasSideEffects = 1 in + defm IDTE : SideEffectQuaternaryRRFbOpt<"idte", 0xB98E, GR64, GR64, GR64>; + +// Compare and replace DAT table entry. +let Predicates = [FeatureEnhancedDAT2], hasSideEffects = 1, Defs = [CC] in + defm CRDTE : SideEffectQuaternaryRRFbOpt<"crdte", 0xB98F, GR128, GR128, GR64>; + +// Purge TLB. +let hasSideEffects = 1 in + def PTLB : SideEffectInherentS<"ptlb", 0xB20D, null_frag>; + +// Compare and swap and purge. +let hasSideEffects = 1, Defs = [CC] in { + def CSP : CmpSwapRRE<"csp", 0xB250, GR128, GR64>; + def CSPG : CmpSwapRRE<"cspg", 0xB98A, GR128, GR64>; +} + +// Load page-table-entry address. +let hasSideEffects = 1, Defs = [CC] in + def LPTEA : TernaryRRFb<"lptea", 0xB9AA, GR64, GR64, GR64>; + +// Load real address. +let hasSideEffects = 1, Defs = [CC] in { + defm LRA : LoadAddressRXPair<"lra", 0xB1, 0xE313, null_frag>; + def LRAG : LoadAddressRXY<"lrag", 0xE303, null_frag, laaddr20pair>; +} + +// Store real address. +def STRAG : StoreSSE<"strag", 0xE502>; + +// Load using real address. +let mayLoad = 1 in { + def LURA : UnaryRRE<"lura", 0xB24B, null_frag, GR32, GR64>; + def LURAG : UnaryRRE<"lurag", 0xB905, null_frag, GR64, GR64>; +} + +// Store using real address. +let mayStore = 1 in { + def STURA : SideEffectBinaryRRE<"stura", 0xB246, GR32, GR64>; + def STURG : SideEffectBinaryRRE<"sturg", 0xB925, GR64, GR64>; +} + +// Test protection. +let hasSideEffects = 1, Defs = [CC] in + def TPROT : SideEffectBinarySSE<"tprot", 0xE501>; + +//===----------------------------------------------------------------------===// +// Memory-move Instructions. +//===----------------------------------------------------------------------===// + +// Move with key. +let mayLoad = 1, mayStore = 1, Defs = [CC] in + def MVCK : MemoryBinarySSd<"mvck", 0xD9, GR64>; + +// Move to primary / secondary. +let mayLoad = 1, mayStore = 1, Defs = [CC] in { + def MVCP : MemoryBinarySSd<"mvcp", 0xDA, GR64>; + def MVCS : MemoryBinarySSd<"mvcs", 0xDB, GR64>; +} + +// Move with source / destination key. +let mayLoad = 1, mayStore = 1, Uses = [R0L, R1L] in { + def MVCSK : SideEffectBinarySSE<"mvcsk", 0xE50E>; + def MVCDK : SideEffectBinarySSE<"mvcdk", 0xE50F>; +} + +// Move with optional specifications. +let mayLoad = 1, mayStore = 1, Uses = [R0L] in + def MVCOS : SideEffectTernarySSF<"mvcos", 0xC80, GR64>; + +// Move page. +let mayLoad = 1, mayStore = 1, Uses = [R0L], Defs = [CC] in + def MVPG : SideEffectBinaryRRE<"mvpg", 0xB254, GR64, GR64>; + +//===----------------------------------------------------------------------===// +// Address-Space Instructions. +//===----------------------------------------------------------------------===// + +// Load address space parameters. +let hasSideEffects = 1, Defs = [CC] in + def LASP : SideEffectBinarySSE<"lasp", 0xE500>; + +// Purge ALB. +let hasSideEffects = 1 in + def PALB : SideEffectInherentRRE<"palb", 0xB248>; + +// Program call. +let hasSideEffects = 1 in + def PC : SideEffectAddressS<"pc", 0xB218, null_frag>; + +// Program return. +let hasSideEffects = 1, Defs = [CC] in + def PR : SideEffectInherentE<"pr", 0x0101>; + +// Program transfer (with instance). +let hasSideEffects = 1 in { + def PT : SideEffectBinaryRRE<"pt", 0xB228, GR32, GR64>; + def PTI : SideEffectBinaryRRE<"pti", 0xB99E, GR64, GR64>; +} + +// Resume program. +let hasSideEffects = 1, Defs = [CC] in + def RP : SideEffectAddressS<"rp", 0xB277, null_frag>; + +// Branch in subspace group. +let hasSideEffects = 1 in + def BSG : UnaryRRE<"bsg", 0xB258, null_frag, GR64, GR64>; + +// Branch and set authority. +let hasSideEffects = 1 in + def BSA : UnaryRRE<"bsa", 0xB25A, null_frag, GR64, GR64>; + +// Test access. +let Defs = [CC] in + def TAR : SideEffectBinaryRRE<"tar", 0xB24C, AR32, GR32>; + +//===----------------------------------------------------------------------===// +// Linkage-Stack Instructions. +//===----------------------------------------------------------------------===// + +// Branch and stack. +let hasSideEffects = 1 in + def BAKR : SideEffectBinaryRRE<"bakr", 0xB240, GR64, GR64>; + +// Extract stacked registers. +let hasSideEffects = 1 in { + def EREG : SideEffectBinaryRRE<"ereg", 0xB249, GR32, GR32>; + def EREGG : SideEffectBinaryRRE<"eregg", 0xB90E, GR64, GR64>; +} + +// Extract stacked state. +let hasSideEffects = 1, Defs = [CC] in + def ESTA : UnaryRRE<"esta", 0xB24A, null_frag, GR128, GR32>; + +// Modify stacked state. +let hasSideEffects = 1 in + def MSTA : SideEffectUnaryRRE<"msta", 0xB247, GR128, null_frag>; + +//===----------------------------------------------------------------------===// +// Time-Related Instructions. +//===----------------------------------------------------------------------===// + +// Perform timing facility function. +let hasSideEffects = 1, mayLoad = 1, Uses = [R0L, R1D], Defs = [CC] in + def PTFF : SideEffectInherentE<"ptff", 0x0104>; + +// Set clock. +let hasSideEffects = 1, Defs = [CC] in + def SCK : SideEffectUnaryS<"sck", 0xB204, null_frag, 8>; + +// Set clock programmable field. +let hasSideEffects = 1, Uses = [R0L] in + def SCKPF : SideEffectInherentE<"sckpf", 0x0107>; + +// Set clock comparator. +let hasSideEffects = 1 in + def SCKC : SideEffectUnaryS<"sckc", 0xB206, null_frag, 8>; + +// Set CPU timer. +let hasSideEffects = 1 in + def SPT : SideEffectUnaryS<"spt", 0xB208, null_frag, 8>; + +// Store clock (fast / extended). +let hasSideEffects = 1, Defs = [CC] in { + def STCK : StoreInherentS<"stck", 0xB205, null_frag, 8>; + def STCKF : StoreInherentS<"stckf", 0xB27C, null_frag, 8>; + def STCKE : StoreInherentS<"stcke", 0xB278, null_frag, 16>; +} + +// Store clock comparator. +let hasSideEffects = 1 in + def STCKC : StoreInherentS<"stckc", 0xB207, null_frag, 8>; + +// Store CPU timer. +let hasSideEffects = 1 in + def STPT : StoreInherentS<"stpt", 0xB209, null_frag, 8>; + +//===----------------------------------------------------------------------===// +// CPU-Related Instructions. +//===----------------------------------------------------------------------===// + +// Store CPU address. +let hasSideEffects = 1 in + def STAP : StoreInherentS<"stap", 0xB212, null_frag, 2>; + +// Store CPU ID. +let hasSideEffects = 1 in + def STIDP : StoreInherentS<"stidp", 0xB202, null_frag, 8>; + +// Store system information. +let hasSideEffects = 1, Uses = [R0L, R1L], Defs = [R0L, CC] in + def STSI : StoreInherentS<"stsi", 0xB27D, null_frag, 0>; + +// Store facility list. +let hasSideEffects = 1 in + def STFL : StoreInherentS<"stfl", 0xB2B1, null_frag, 4>; + +// Store facility list extended. +let hasSideEffects = 1, Uses = [R0D], Defs = [R0D, CC] in + def STFLE : StoreInherentS<"stfle", 0xB2B0, null_frag, 0>; + +// Extract CPU attribute. +let hasSideEffects = 1 in + def ECAG : BinaryRSY<"ecag", 0xEB4C, null_frag, GR64>; + +// Extract CPU time. +let hasSideEffects = 1, mayLoad = 1, Defs = [R0D, R1D] in + def ECTG : SideEffectTernarySSF<"ectg", 0xC81, GR64>; + +// Perform topology function. +let hasSideEffects = 1 in + def PTF : UnaryTiedRRE<"ptf", 0xB9A2, GR64>; + +// Perform cryptographic key management operation. +let Predicates = [FeatureMessageSecurityAssist3], + hasSideEffects = 1, Uses = [R0L, R1D] in + def PCKMO : SideEffectInherentRRE<"pckmo", 0xB928>; + +//===----------------------------------------------------------------------===// +// Miscellaneous Instructions. +//===----------------------------------------------------------------------===// + +// Supervisor call. +let hasSideEffects = 1, isCall = 1, Defs = [CC] in + def SVC : SideEffectUnaryI<"svc", 0x0A, imm32zx8>; + +// Monitor call. +let hasSideEffects = 1, isCall = 1 in + def MC : SideEffectBinarySI<"mc", 0xAF, imm32zx8>; + +// Diagnose. +let hasSideEffects = 1, isCall = 1 in + def DIAG : SideEffectTernaryRS<"diag", 0x83, GR32, GR32>; + +// Trace. +let hasSideEffects = 1, mayLoad = 1 in { + def TRACE : SideEffectTernaryRS<"trace", 0x99, GR32, GR32>; + def TRACG : SideEffectTernaryRSY<"tracg", 0xEB0F, GR64, GR64>; +} + +// Trap. +let hasSideEffects = 1 in { + def TRAP2 : SideEffectInherentE<"trap2", 0x01FF>; + def TRAP4 : SideEffectAddressS<"trap4", 0xB2FF, null_frag>; +} + +// Signal processor. +let hasSideEffects = 1, Defs = [CC] in + def SIGP : SideEffectTernaryRS<"sigp", 0xAE, GR64, GR64>; + +// Signal adapter. +let hasSideEffects = 1, Uses = [R0D, R1D, R2D, R3D], Defs = [CC] in + def SIGA : SideEffectAddressS<"siga", 0xB274, null_frag>; + +// Start interpretive execution. +let hasSideEffects = 1, Defs = [CC] in + def SIE : SideEffectUnaryS<"sie", 0xB214, null_frag, 0>; + +//===----------------------------------------------------------------------===// +// CPU-Measurement Facility Instructions (SA23-2260). +//===----------------------------------------------------------------------===// + +// Load program parameter +let hasSideEffects = 1 in + def LPP : SideEffectUnaryS<"lpp", 0xB280, null_frag, 8>; + +// Extract coprocessor-group address. +let hasSideEffects = 1, Defs = [CC] in + def ECPGA : UnaryRRE<"ecpga", 0xB2ED, null_frag, GR32, GR64>; + +// Extract CPU counter. +let hasSideEffects = 1, Defs = [CC] in + def ECCTR : UnaryRRE<"ecctr", 0xB2E4, null_frag, GR64, GR64>; + +// Extract peripheral counter. +let hasSideEffects = 1, Defs = [CC] in + def EPCTR : UnaryRRE<"epctr", 0xB2E5, null_frag, GR64, GR64>; + +// Load CPU-counter-set controls. +let hasSideEffects = 1, Defs = [CC] in + def LCCTL : SideEffectUnaryS<"lcctl", 0xB284, null_frag, 8>; + +// Load peripheral-counter-set controls. +let hasSideEffects = 1, Defs = [CC] in + def LPCTL : SideEffectUnaryS<"lpctl", 0xB285, null_frag, 8>; + +// Load sampling controls. +let hasSideEffects = 1, Defs = [CC] in + def LSCTL : SideEffectUnaryS<"lsctl", 0xB287, null_frag, 0>; + +// Query sampling information. +let hasSideEffects = 1 in + def QSI : StoreInherentS<"qsi", 0xB286, null_frag, 0>; + +// Query counter information. +let hasSideEffects = 1 in + def QCTRI : StoreInherentS<"qctri", 0xB28E, null_frag, 0>; + +// Set CPU counter. +let hasSideEffects = 1, Defs = [CC] in + def SCCTR : SideEffectBinaryRRE<"scctr", 0xB2E0, GR64, GR64>; + +// Set peripheral counter. +let hasSideEffects = 1, Defs = [CC] in + def SPCTR : SideEffectBinaryRRE<"spctr", 0xB2E1, GR64, GR64>; + +//===----------------------------------------------------------------------===// +// I/O Instructions (Principles of Operation, Chapter 14). +//===----------------------------------------------------------------------===// + +// Clear subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def CSCH : SideEffectInherentS<"csch", 0xB230, null_frag>; + +// Halt subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def HSCH : SideEffectInherentS<"hsch", 0xB231, null_frag>; + +// Modify subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def MSCH : SideEffectUnaryS<"msch", 0xB232, null_frag, 0>; + +// Resume subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def RSCH : SideEffectInherentS<"rsch", 0xB238, null_frag>; + +// Start subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def SSCH : SideEffectUnaryS<"ssch", 0xB233, null_frag, 0>; + +// Store subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def STSCH : StoreInherentS<"stsch", 0xB234, null_frag, 0>; + +// Test subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def TSCH : StoreInherentS<"tsch", 0xB235, null_frag, 0>; + +// Cancel subchannel. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def XSCH : SideEffectInherentS<"xsch", 0xB276, null_frag>; + +// Reset channel path. +let hasSideEffects = 1, Uses = [R1L], Defs = [CC] in + def RCHP : SideEffectInherentS<"rchp", 0xB23B, null_frag>; + +// Set channel monitor. +let hasSideEffects = 1, mayLoad = 1, Uses = [R1L, R2D] in + def SCHM : SideEffectInherentS<"schm", 0xB23C, null_frag>; + +// Store channel path status. +let hasSideEffects = 1 in + def STCPS : StoreInherentS<"stcps", 0xB23A, null_frag, 0>; + +// Store channel report word. +let hasSideEffects = 1, Defs = [CC] in + def STCRW : StoreInherentS<"stcrw", 0xB239, null_frag, 0>; + +// Test pending interruption. +let hasSideEffects = 1, Defs = [CC] in + def TPI : StoreInherentS<"tpi", 0xB236, null_frag, 0>; + +// Set address limit. +let hasSideEffects = 1, Uses = [R1L] in + def SAL : SideEffectInherentS<"sal", 0xB237, null_frag>; + diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrVector.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrVector.td index 738ea7a..c9a02d9 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrVector.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrVector.td @@ -14,7 +14,7 @@ let Predicates = [FeatureVector] in { // Register move. def VLR : UnaryVRRa<"vlr", 0xE756, null_frag, v128any, v128any>; - def VLR32 : UnaryAliasVRR<null_frag, v32eb, v32eb>; + def VLR32 : UnaryAliasVRR<null_frag, v32sb, v32sb>; def VLR64 : UnaryAliasVRR<null_frag, v64db, v64db>; // Load GR from VR element. @@ -56,17 +56,28 @@ def : VectorExtractSubreg<v4i32, VLGVF>; //===----------------------------------------------------------------------===// let Predicates = [FeatureVector] in { - // Generate byte mask. - def VZERO : InherentVRIa<"vzero", 0xE744, 0>; - def VONE : InherentVRIa<"vone", 0xE744, 0xffff>; - def VGBM : UnaryVRIa<"vgbm", 0xE744, z_byte_mask, v128b, imm32zx16>; - - // Generate mask. - def VGM : BinaryVRIbGeneric<"vgm", 0xE746>; - def VGMB : BinaryVRIb<"vgmb", 0xE746, z_rotate_mask, v128b, 0>; - def VGMH : BinaryVRIb<"vgmh", 0xE746, z_rotate_mask, v128h, 1>; - def VGMF : BinaryVRIb<"vgmf", 0xE746, z_rotate_mask, v128f, 2>; - def VGMG : BinaryVRIb<"vgmg", 0xE746, z_rotate_mask, v128g, 3>; + let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1, + isReMaterializable = 1 in { + + // Generate byte mask. + def VZERO : InherentVRIa<"vzero", 0xE744, 0>; + def VONE : InherentVRIa<"vone", 0xE744, 0xffff>; + def VGBM : UnaryVRIa<"vgbm", 0xE744, z_byte_mask, v128b, imm32zx16>; + + // Generate mask. + def VGM : BinaryVRIbGeneric<"vgm", 0xE746>; + def VGMB : BinaryVRIb<"vgmb", 0xE746, z_rotate_mask, v128b, 0>; + def VGMH : BinaryVRIb<"vgmh", 0xE746, z_rotate_mask, v128h, 1>; + def VGMF : BinaryVRIb<"vgmf", 0xE746, z_rotate_mask, v128f, 2>; + def VGMG : BinaryVRIb<"vgmg", 0xE746, z_rotate_mask, v128g, 3>; + + // Replicate immediate. + def VREPI : UnaryVRIaGeneric<"vrepi", 0xE745, imm32sx16>; + def VREPIB : UnaryVRIa<"vrepib", 0xE745, z_replicate, v128b, imm32sx16, 0>; + def VREPIH : UnaryVRIa<"vrepih", 0xE745, z_replicate, v128h, imm32sx16, 1>; + def VREPIF : UnaryVRIa<"vrepif", 0xE745, z_replicate, v128f, imm32sx16, 2>; + def VREPIG : UnaryVRIa<"vrepig", 0xE745, z_replicate, v128g, imm32sx16, 3>; + } // Load element immediate. // @@ -86,13 +97,6 @@ let Predicates = [FeatureVector] in { def VLEIG : TernaryVRIa<"vleig", 0xE742, z_vector_insert, v128g, v128g, imm64sx16, imm32zx1>; } - - // Replicate immediate. - def VREPI : UnaryVRIaGeneric<"vrepi", 0xE745, imm32sx16>; - def VREPIB : UnaryVRIa<"vrepib", 0xE745, z_replicate, v128b, imm32sx16, 0>; - def VREPIH : UnaryVRIa<"vrepih", 0xE745, z_replicate, v128h, imm32sx16, 1>; - def VREPIF : UnaryVRIa<"vrepif", 0xE745, z_replicate, v128f, imm32sx16, 2>; - def VREPIG : UnaryVRIa<"vrepig", 0xE745, z_replicate, v128g, imm32sx16, 3>; } //===----------------------------------------------------------------------===// @@ -137,7 +141,7 @@ let Predicates = [FeatureVector] in { // LEY and LDY offer full 20-bit displacement fields. It's often better // to use those instructions rather than force a 20-bit displacement // into a GPR temporary. - def VL32 : UnaryAliasVRX<load, v32eb, bdxaddr12pair>; + def VL32 : UnaryAliasVRX<load, v32sb, bdxaddr12pair>; def VL64 : UnaryAliasVRX<load, v64db, bdxaddr12pair>; // Load logical element and zero. @@ -150,6 +154,11 @@ let Predicates = [FeatureVector] in { (VLLEZF bdxaddr12only:$addr)>; def : Pat<(v2f64 (z_vllezf64 bdxaddr12only:$addr)), (VLLEZG bdxaddr12only:$addr)>; + let Predicates = [FeatureVectorEnhancements1] in { + def VLLEZLF : UnaryVRX<"vllezlf", 0xE704, z_vllezli32, v128f, 4, 6>; + def : Pat<(v4f32 (z_vllezlf32 bdxaddr12only:$addr)), + (VLLEZLF bdxaddr12only:$addr)>; + } // Load element. def VLEB : TernaryVRX<"vleb", 0xE700, z_vlei8, v128b, v128b, 1, imm32zx4>; @@ -166,6 +175,13 @@ let Predicates = [FeatureVector] in { def VGEG : TernaryVRV<"vgeg", 0xE712, 8, imm32zx1>; } +let Predicates = [FeatureVectorPackedDecimal] in { + // Load rightmost with length. The number of loaded bytes is only known + // at run time. + def VLRL : BinaryVSI<"vlrl", 0xE635, int_s390_vlrl, 0>; + def VLRLR : BinaryVRSd<"vlrlr", 0xE637, int_s390_vlrl, 0>; +} + // Use replicating loads if we're inserting a single element into an // undefined vector. This avoids a false dependency on the previous // register contents. @@ -215,7 +231,7 @@ let Predicates = [FeatureVector] in { // STEY and STDY offer full 20-bit displacement fields. It's often better // to use those instructions rather than force a 20-bit displacement // into a GPR temporary. - def VST32 : StoreAliasVRX<store, v32eb, bdxaddr12pair>; + def VST32 : StoreAliasVRX<store, v32sb, bdxaddr12pair>; def VST64 : StoreAliasVRX<store, v64db, bdxaddr12pair>; // Scatter element. @@ -223,6 +239,13 @@ let Predicates = [FeatureVector] in { def VSCEG : StoreBinaryVRV<"vsceg", 0xE71A, 8, imm32zx1>; } +let Predicates = [FeatureVectorPackedDecimal] in { + // Store rightmost with length. The number of stored bytes is only known + // at run time. + def VSTRL : StoreLengthVSI<"vstrl", 0xE63D, int_s390_vstrl, 0>; + def VSTRLR : StoreLengthVRSd<"vstrlr", 0xE63F, int_s390_vstrl, 0>; +} + //===----------------------------------------------------------------------===// // Selects and permutes //===----------------------------------------------------------------------===// @@ -252,6 +275,10 @@ let Predicates = [FeatureVector] in { // Permute doubleword immediate. def VPDI : TernaryVRRc<"vpdi", 0xE784, z_permute_dwords, v128g, v128g>; + // Bit Permute. + let Predicates = [FeatureVectorEnhancements1] in + def VBPERM : BinaryVRRc<"vbperm", 0xE785, int_s390_vbperm, v128g, v128b>; + // Replicate. def VREP: BinaryVRIcGeneric<"vrep", 0xE74D>; def VREPB : BinaryVRIc<"vrepb", 0xE74D, z_splat, v128b, v128b, 0>; @@ -420,6 +447,10 @@ let Predicates = [FeatureVector] in { def VCTZF : UnaryVRRa<"vctzf", 0xE752, cttz, v128f, v128f, 2>; def VCTZG : UnaryVRRa<"vctzg", 0xE752, cttz, v128g, v128g, 3>; + // Not exclusive or. + let Predicates = [FeatureVectorEnhancements1] in + def VNX : BinaryVRRc<"vnx", 0xE76C, null_frag, v128any, v128any>; + // Exclusive or. def VX : BinaryVRRc<"vx", 0xE76D, null_frag, v128any, v128any>; @@ -563,6 +594,17 @@ let Predicates = [FeatureVector] in { def VMLOH : BinaryVRRc<"vmloh", 0xE7A5, int_s390_vmloh, v128f, v128h, 1>; def VMLOF : BinaryVRRc<"vmlof", 0xE7A5, int_s390_vmlof, v128g, v128f, 2>; + // Multiply sum logical. + let Predicates = [FeatureVectorEnhancements1] in { + def VMSL : QuaternaryVRRdGeneric<"vmsl", 0xE7B8>; + def VMSLG : QuaternaryVRRd<"vmslg", 0xE7B8, int_s390_vmslg, + v128q, v128g, v128g, v128q, 3>; + } + + // Nand. + let Predicates = [FeatureVectorEnhancements1] in + def VNN : BinaryVRRc<"vnn", 0xE76E, null_frag, v128any, v128any>; + // Nor. def VNO : BinaryVRRc<"vno", 0xE76B, null_frag, v128any, v128any>; def : InstAlias<"vnot\t$V1, $V2", (VNO VR128:$V1, VR128:$V2, VR128:$V2), 0>; @@ -570,9 +612,19 @@ let Predicates = [FeatureVector] in { // Or. def VO : BinaryVRRc<"vo", 0xE76A, null_frag, v128any, v128any>; + // Or with complement. + let Predicates = [FeatureVectorEnhancements1] in + def VOC : BinaryVRRc<"voc", 0xE76F, null_frag, v128any, v128any>; + // Population count. def VPOPCT : UnaryVRRaGeneric<"vpopct", 0xE750>; def : Pat<(v16i8 (z_popcnt VR128:$x)), (VPOPCT VR128:$x, 0)>; + let Predicates = [FeatureVectorEnhancements1] in { + def VPOPCTB : UnaryVRRa<"vpopctb", 0xE750, ctpop, v128b, v128b, 0>; + def VPOPCTH : UnaryVRRa<"vpopcth", 0xE750, ctpop, v128h, v128h, 1>; + def VPOPCTF : UnaryVRRa<"vpopctf", 0xE750, ctpop, v128f, v128f, 2>; + def VPOPCTG : UnaryVRRa<"vpopctg", 0xE750, ctpop, v128g, v128g, 3>; + } // Element rotate left logical (with vector shift amount). def VERLLV : BinaryVRRcGeneric<"verllv", 0xE773>; @@ -720,6 +772,14 @@ multiclass BitwiseVectorOps<ValueType type> { (VNO VR128:$x, VR128:$y)>; def : Pat<(type (z_vnot VR128:$x)), (VNO VR128:$x, VR128:$x)>; } + let Predicates = [FeatureVectorEnhancements1] in { + def : Pat<(type (z_vnot (xor VR128:$x, VR128:$y))), + (VNX VR128:$x, VR128:$y)>; + def : Pat<(type (z_vnot (and VR128:$x, VR128:$y))), + (VNN VR128:$x, VR128:$y)>; + def : Pat<(type (or VR128:$x, (z_vnot VR128:$y))), + (VOC VR128:$x, VR128:$y)>; + } } defm : BitwiseVectorOps<v16i8>; @@ -875,6 +935,11 @@ let Predicates = [FeatureVector] in { def VFA : BinaryVRRcFloatGeneric<"vfa", 0xE7E3>; def VFADB : BinaryVRRc<"vfadb", 0xE7E3, fadd, v128db, v128db, 3, 0>; def WFADB : BinaryVRRc<"wfadb", 0xE7E3, fadd, v64db, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFASB : BinaryVRRc<"vfasb", 0xE7E3, fadd, v128sb, v128sb, 2, 0>; + def WFASB : BinaryVRRc<"wfasb", 0xE7E3, fadd, v32sb, v32sb, 2, 8>; + def WFAXB : BinaryVRRc<"wfaxb", 0xE7E3, fadd, v128xb, v128xb, 4, 8>; + } // Convert from fixed 64-bit. def VCDG : TernaryVRRaFloatGeneric<"vcdg", 0xE7C3>; @@ -906,6 +971,11 @@ let Predicates = [FeatureVector] in { def VFD : BinaryVRRcFloatGeneric<"vfd", 0xE7E5>; def VFDDB : BinaryVRRc<"vfddb", 0xE7E5, fdiv, v128db, v128db, 3, 0>; def WFDDB : BinaryVRRc<"wfddb", 0xE7E5, fdiv, v64db, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFDSB : BinaryVRRc<"vfdsb", 0xE7E5, fdiv, v128sb, v128sb, 2, 0>; + def WFDSB : BinaryVRRc<"wfdsb", 0xE7E5, fdiv, v32sb, v32sb, 2, 8>; + def WFDXB : BinaryVRRc<"wfdxb", 0xE7E5, fdiv, v128xb, v128xb, 4, 8>; + } // Load FP integer. def VFI : TernaryVRRaFloatGeneric<"vfi", 0xE7C7>; @@ -913,66 +983,213 @@ let Predicates = [FeatureVector] in { def WFIDB : TernaryVRRa<"wfidb", 0xE7C7, null_frag, v64db, v64db, 3, 8>; defm : VectorRounding<VFIDB, v128db>; defm : VectorRounding<WFIDB, v64db>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFISB : TernaryVRRa<"vfisb", 0xE7C7, int_s390_vfisb, v128sb, v128sb, 2, 0>; + def WFISB : TernaryVRRa<"wfisb", 0xE7C7, null_frag, v32sb, v32sb, 2, 8>; + def WFIXB : TernaryVRRa<"wfixb", 0xE7C7, null_frag, v128xb, v128xb, 4, 8>; + defm : VectorRounding<VFISB, v128sb>; + defm : VectorRounding<WFISB, v32sb>; + defm : VectorRounding<WFIXB, v128xb>; + } // Load lengthened. def VLDE : UnaryVRRaFloatGeneric<"vlde", 0xE7C4>; - def VLDEB : UnaryVRRa<"vldeb", 0xE7C4, z_vextend, v128db, v128eb, 2, 0>; - def WLDEB : UnaryVRRa<"wldeb", 0xE7C4, fpextend, v64db, v32eb, 2, 8>; + def VLDEB : UnaryVRRa<"vldeb", 0xE7C4, z_vextend, v128db, v128sb, 2, 0>; + def WLDEB : UnaryVRRa<"wldeb", 0xE7C4, fpextend, v64db, v32sb, 2, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + let isAsmParserOnly = 1 in { + def VFLL : UnaryVRRaFloatGeneric<"vfll", 0xE7C4>; + def VFLLS : UnaryVRRa<"vflls", 0xE7C4, null_frag, v128db, v128sb, 2, 0>; + def WFLLS : UnaryVRRa<"wflls", 0xE7C4, null_frag, v64db, v32sb, 2, 8>; + } + def WFLLD : UnaryVRRa<"wflld", 0xE7C4, fpextend, v128xb, v64db, 3, 8>; + def : Pat<(f128 (fpextend (f32 VR32:$src))), + (WFLLD (WLDEB VR32:$src))>; + } - // Load rounded, + // Load rounded. def VLED : TernaryVRRaFloatGeneric<"vled", 0xE7C5>; - def VLEDB : TernaryVRRa<"vledb", 0xE7C5, null_frag, v128eb, v128db, 3, 0>; - def WLEDB : TernaryVRRa<"wledb", 0xE7C5, null_frag, v32eb, v64db, 3, 8>; + def VLEDB : TernaryVRRa<"vledb", 0xE7C5, null_frag, v128sb, v128db, 3, 0>; + def WLEDB : TernaryVRRa<"wledb", 0xE7C5, null_frag, v32sb, v64db, 3, 8>; def : Pat<(v4f32 (z_vround (v2f64 VR128:$src))), (VLEDB VR128:$src, 0, 0)>; - def : FPConversion<WLEDB, fpround, v32eb, v64db, 0, 0>; + def : FPConversion<WLEDB, fpround, v32sb, v64db, 0, 0>; + let Predicates = [FeatureVectorEnhancements1] in { + let isAsmParserOnly = 1 in { + def VFLR : TernaryVRRaFloatGeneric<"vflr", 0xE7C5>; + def VFLRD : TernaryVRRa<"vflrd", 0xE7C5, null_frag, v128sb, v128db, 3, 0>; + def WFLRD : TernaryVRRa<"wflrd", 0xE7C5, null_frag, v32sb, v64db, 3, 8>; + } + def WFLRX : TernaryVRRa<"wflrx", 0xE7C5, null_frag, v64db, v128xb, 4, 8>; + def : FPConversion<WFLRX, fpround, v64db, v128xb, 0, 0>; + def : Pat<(f32 (fpround (f128 VR128:$src))), + (WLEDB (WFLRX VR128:$src, 0, 3), 0, 0)>; + } + + // Maximum. + multiclass VectorMax<Instruction insn, TypedReg tr> { + def : FPMinMax<insn, fmaxnum, tr, 4>; + def : FPMinMax<insn, fmaxnan, tr, 1>; + } + let Predicates = [FeatureVectorEnhancements1] in { + def VFMAX : TernaryVRRcFloatGeneric<"vfmax", 0xE7EF>; + def VFMAXDB : TernaryVRRcFloat<"vfmaxdb", 0xE7EF, int_s390_vfmaxdb, + v128db, v128db, 3, 0>; + def WFMAXDB : TernaryVRRcFloat<"wfmaxdb", 0xE7EF, null_frag, + v64db, v64db, 3, 8>; + def VFMAXSB : TernaryVRRcFloat<"vfmaxsb", 0xE7EF, int_s390_vfmaxsb, + v128sb, v128sb, 2, 0>; + def WFMAXSB : TernaryVRRcFloat<"wfmaxsb", 0xE7EF, null_frag, + v32sb, v32sb, 2, 8>; + def WFMAXXB : TernaryVRRcFloat<"wfmaxxb", 0xE7EF, null_frag, + v128xb, v128xb, 4, 8>; + defm : VectorMax<VFMAXDB, v128db>; + defm : VectorMax<WFMAXDB, v64db>; + defm : VectorMax<VFMAXSB, v128sb>; + defm : VectorMax<WFMAXSB, v32sb>; + defm : VectorMax<WFMAXXB, v128xb>; + } + + // Minimum. + multiclass VectorMin<Instruction insn, TypedReg tr> { + def : FPMinMax<insn, fminnum, tr, 4>; + def : FPMinMax<insn, fminnan, tr, 1>; + } + let Predicates = [FeatureVectorEnhancements1] in { + def VFMIN : TernaryVRRcFloatGeneric<"vfmin", 0xE7EE>; + def VFMINDB : TernaryVRRcFloat<"vfmindb", 0xE7EE, int_s390_vfmindb, + v128db, v128db, 3, 0>; + def WFMINDB : TernaryVRRcFloat<"wfmindb", 0xE7EE, null_frag, + v64db, v64db, 3, 8>; + def VFMINSB : TernaryVRRcFloat<"vfminsb", 0xE7EE, int_s390_vfminsb, + v128sb, v128sb, 2, 0>; + def WFMINSB : TernaryVRRcFloat<"wfminsb", 0xE7EE, null_frag, + v32sb, v32sb, 2, 8>; + def WFMINXB : TernaryVRRcFloat<"wfminxb", 0xE7EE, null_frag, + v128xb, v128xb, 4, 8>; + defm : VectorMin<VFMINDB, v128db>; + defm : VectorMin<WFMINDB, v64db>; + defm : VectorMin<VFMINSB, v128sb>; + defm : VectorMin<WFMINSB, v32sb>; + defm : VectorMin<WFMINXB, v128xb>; + } // Multiply. def VFM : BinaryVRRcFloatGeneric<"vfm", 0xE7E7>; def VFMDB : BinaryVRRc<"vfmdb", 0xE7E7, fmul, v128db, v128db, 3, 0>; def WFMDB : BinaryVRRc<"wfmdb", 0xE7E7, fmul, v64db, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFMSB : BinaryVRRc<"vfmsb", 0xE7E7, fmul, v128sb, v128sb, 2, 0>; + def WFMSB : BinaryVRRc<"wfmsb", 0xE7E7, fmul, v32sb, v32sb, 2, 8>; + def WFMXB : BinaryVRRc<"wfmxb", 0xE7E7, fmul, v128xb, v128xb, 4, 8>; + } // Multiply and add. def VFMA : TernaryVRReFloatGeneric<"vfma", 0xE78F>; def VFMADB : TernaryVRRe<"vfmadb", 0xE78F, fma, v128db, v128db, 0, 3>; def WFMADB : TernaryVRRe<"wfmadb", 0xE78F, fma, v64db, v64db, 8, 3>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFMASB : TernaryVRRe<"vfmasb", 0xE78F, fma, v128sb, v128sb, 0, 2>; + def WFMASB : TernaryVRRe<"wfmasb", 0xE78F, fma, v32sb, v32sb, 8, 2>; + def WFMAXB : TernaryVRRe<"wfmaxb", 0xE78F, fma, v128xb, v128xb, 8, 4>; + } // Multiply and subtract. def VFMS : TernaryVRReFloatGeneric<"vfms", 0xE78E>; def VFMSDB : TernaryVRRe<"vfmsdb", 0xE78E, fms, v128db, v128db, 0, 3>; def WFMSDB : TernaryVRRe<"wfmsdb", 0xE78E, fms, v64db, v64db, 8, 3>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFMSSB : TernaryVRRe<"vfmssb", 0xE78E, fms, v128sb, v128sb, 0, 2>; + def WFMSSB : TernaryVRRe<"wfmssb", 0xE78E, fms, v32sb, v32sb, 8, 2>; + def WFMSXB : TernaryVRRe<"wfmsxb", 0xE78E, fms, v128xb, v128xb, 8, 4>; + } + + // Negative multiply and add. + let Predicates = [FeatureVectorEnhancements1] in { + def VFNMA : TernaryVRReFloatGeneric<"vfnma", 0xE79F>; + def VFNMADB : TernaryVRRe<"vfnmadb", 0xE79F, fnma, v128db, v128db, 0, 3>; + def WFNMADB : TernaryVRRe<"wfnmadb", 0xE79F, fnma, v64db, v64db, 8, 3>; + def VFNMASB : TernaryVRRe<"vfnmasb", 0xE79F, fnma, v128sb, v128sb, 0, 2>; + def WFNMASB : TernaryVRRe<"wfnmasb", 0xE79F, fnma, v32sb, v32sb, 8, 2>; + def WFNMAXB : TernaryVRRe<"wfnmaxb", 0xE79F, fnma, v128xb, v128xb, 8, 4>; + } + + // Negative multiply and subtract. + let Predicates = [FeatureVectorEnhancements1] in { + def VFNMS : TernaryVRReFloatGeneric<"vfnms", 0xE79E>; + def VFNMSDB : TernaryVRRe<"vfnmsdb", 0xE79E, fnms, v128db, v128db, 0, 3>; + def WFNMSDB : TernaryVRRe<"wfnmsdb", 0xE79E, fnms, v64db, v64db, 8, 3>; + def VFNMSSB : TernaryVRRe<"vfnmssb", 0xE79E, fnms, v128sb, v128sb, 0, 2>; + def WFNMSSB : TernaryVRRe<"wfnmssb", 0xE79E, fnms, v32sb, v32sb, 8, 2>; + def WFNMSXB : TernaryVRRe<"wfnmsxb", 0xE79E, fnms, v128xb, v128xb, 8, 4>; + } // Perform sign operation. def VFPSO : BinaryVRRaFloatGeneric<"vfpso", 0xE7CC>; def VFPSODB : BinaryVRRa<"vfpsodb", 0xE7CC, null_frag, v128db, v128db, 3, 0>; def WFPSODB : BinaryVRRa<"wfpsodb", 0xE7CC, null_frag, v64db, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFPSOSB : BinaryVRRa<"vfpsosb", 0xE7CC, null_frag, v128sb, v128sb, 2, 0>; + def WFPSOSB : BinaryVRRa<"wfpsosb", 0xE7CC, null_frag, v32sb, v32sb, 2, 8>; + def WFPSOXB : BinaryVRRa<"wfpsoxb", 0xE7CC, null_frag, v128xb, v128xb, 4, 8>; + } // Load complement. def VFLCDB : UnaryVRRa<"vflcdb", 0xE7CC, fneg, v128db, v128db, 3, 0, 0>; def WFLCDB : UnaryVRRa<"wflcdb", 0xE7CC, fneg, v64db, v64db, 3, 8, 0>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFLCSB : UnaryVRRa<"vflcsb", 0xE7CC, fneg, v128sb, v128sb, 2, 0, 0>; + def WFLCSB : UnaryVRRa<"wflcsb", 0xE7CC, fneg, v32sb, v32sb, 2, 8, 0>; + def WFLCXB : UnaryVRRa<"wflcxb", 0xE7CC, fneg, v128xb, v128xb, 4, 8, 0>; + } // Load negative. def VFLNDB : UnaryVRRa<"vflndb", 0xE7CC, fnabs, v128db, v128db, 3, 0, 1>; def WFLNDB : UnaryVRRa<"wflndb", 0xE7CC, fnabs, v64db, v64db, 3, 8, 1>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFLNSB : UnaryVRRa<"vflnsb", 0xE7CC, fnabs, v128sb, v128sb, 2, 0, 1>; + def WFLNSB : UnaryVRRa<"wflnsb", 0xE7CC, fnabs, v32sb, v32sb, 2, 8, 1>; + def WFLNXB : UnaryVRRa<"wflnxb", 0xE7CC, fnabs, v128xb, v128xb, 4, 8, 1>; + } // Load positive. def VFLPDB : UnaryVRRa<"vflpdb", 0xE7CC, fabs, v128db, v128db, 3, 0, 2>; def WFLPDB : UnaryVRRa<"wflpdb", 0xE7CC, fabs, v64db, v64db, 3, 8, 2>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFLPSB : UnaryVRRa<"vflpsb", 0xE7CC, fabs, v128sb, v128sb, 2, 0, 2>; + def WFLPSB : UnaryVRRa<"wflpsb", 0xE7CC, fabs, v32sb, v32sb, 2, 8, 2>; + def WFLPXB : UnaryVRRa<"wflpxb", 0xE7CC, fabs, v128xb, v128xb, 4, 8, 2>; + } // Square root. def VFSQ : UnaryVRRaFloatGeneric<"vfsq", 0xE7CE>; def VFSQDB : UnaryVRRa<"vfsqdb", 0xE7CE, fsqrt, v128db, v128db, 3, 0>; def WFSQDB : UnaryVRRa<"wfsqdb", 0xE7CE, fsqrt, v64db, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFSQSB : UnaryVRRa<"vfsqsb", 0xE7CE, fsqrt, v128sb, v128sb, 2, 0>; + def WFSQSB : UnaryVRRa<"wfsqsb", 0xE7CE, fsqrt, v32sb, v32sb, 2, 8>; + def WFSQXB : UnaryVRRa<"wfsqxb", 0xE7CE, fsqrt, v128xb, v128xb, 4, 8>; + } // Subtract. def VFS : BinaryVRRcFloatGeneric<"vfs", 0xE7E2>; def VFSDB : BinaryVRRc<"vfsdb", 0xE7E2, fsub, v128db, v128db, 3, 0>; def WFSDB : BinaryVRRc<"wfsdb", 0xE7E2, fsub, v64db, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFSSB : BinaryVRRc<"vfssb", 0xE7E2, fsub, v128sb, v128sb, 2, 0>; + def WFSSB : BinaryVRRc<"wfssb", 0xE7E2, fsub, v32sb, v32sb, 2, 8>; + def WFSXB : BinaryVRRc<"wfsxb", 0xE7E2, fsub, v128xb, v128xb, 4, 8>; + } // Test data class immediate. let Defs = [CC] in { def VFTCI : BinaryVRIeFloatGeneric<"vftci", 0xE74A>; def VFTCIDB : BinaryVRIe<"vftcidb", 0xE74A, z_vftci, v128g, v128db, 3, 0>; def WFTCIDB : BinaryVRIe<"wftcidb", 0xE74A, null_frag, v64g, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + def VFTCISB : BinaryVRIe<"vftcisb", 0xE74A, z_vftci, v128f, v128sb, 2, 0>; + def WFTCISB : BinaryVRIe<"wftcisb", 0xE74A, null_frag, v32f, v32sb, 2, 8>; + def WFTCIXB : BinaryVRIe<"wftcixb", 0xE74A, null_frag, v128q, v128xb, 4, 8>; + } } } @@ -985,12 +1202,20 @@ let Predicates = [FeatureVector] in { let Defs = [CC] in { def WFC : CompareVRRaFloatGeneric<"wfc", 0xE7CB>; def WFCDB : CompareVRRa<"wfcdb", 0xE7CB, z_fcmp, v64db, 3>; + let Predicates = [FeatureVectorEnhancements1] in { + def WFCSB : CompareVRRa<"wfcsb", 0xE7CB, z_fcmp, v32sb, 2>; + def WFCXB : CompareVRRa<"wfcxb", 0xE7CB, z_fcmp, v128xb, 4>; + } } // Compare and signal scalar. let Defs = [CC] in { def WFK : CompareVRRaFloatGeneric<"wfk", 0xE7CA>; def WFKDB : CompareVRRa<"wfkdb", 0xE7CA, null_frag, v64db, 3>; + let Predicates = [FeatureVectorEnhancements1] in { + def WFKSB : CompareVRRa<"wfksb", 0xE7CA, null_frag, v32sb, 2>; + def WFKXB : CompareVRRa<"wfkxb", 0xE7CA, null_frag, v128xb, 4>; + } } // Compare equal. @@ -999,6 +1224,28 @@ let Predicates = [FeatureVector] in { v128g, v128db, 3, 0>; defm WFCEDB : BinaryVRRcSPair<"wfcedb", 0xE7E8, null_frag, null_frag, v64g, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + defm VFCESB : BinaryVRRcSPair<"vfcesb", 0xE7E8, z_vfcmpe, z_vfcmpes, + v128f, v128sb, 2, 0>; + defm WFCESB : BinaryVRRcSPair<"wfcesb", 0xE7E8, null_frag, null_frag, + v32f, v32sb, 2, 8>; + defm WFCEXB : BinaryVRRcSPair<"wfcexb", 0xE7E8, null_frag, null_frag, + v128q, v128xb, 4, 8>; + } + + // Compare and signal equal. + let Predicates = [FeatureVectorEnhancements1] in { + defm VFKEDB : BinaryVRRcSPair<"vfkedb", 0xE7E8, null_frag, null_frag, + v128g, v128db, 3, 4>; + defm WFKEDB : BinaryVRRcSPair<"wfkedb", 0xE7E8, null_frag, null_frag, + v64g, v64db, 3, 12>; + defm VFKESB : BinaryVRRcSPair<"vfkesb", 0xE7E8, null_frag, null_frag, + v128f, v128sb, 2, 4>; + defm WFKESB : BinaryVRRcSPair<"wfkesb", 0xE7E8, null_frag, null_frag, + v32f, v32sb, 2, 12>; + defm WFKEXB : BinaryVRRcSPair<"wfkexb", 0xE7E8, null_frag, null_frag, + v128q, v128xb, 4, 12>; + } // Compare high. def VFCH : BinaryVRRcSPairFloatGeneric<"vfch", 0xE7EB>; @@ -1006,6 +1253,28 @@ let Predicates = [FeatureVector] in { v128g, v128db, 3, 0>; defm WFCHDB : BinaryVRRcSPair<"wfchdb", 0xE7EB, null_frag, null_frag, v64g, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + defm VFCHSB : BinaryVRRcSPair<"vfchsb", 0xE7EB, z_vfcmph, z_vfcmphs, + v128f, v128sb, 2, 0>; + defm WFCHSB : BinaryVRRcSPair<"wfchsb", 0xE7EB, null_frag, null_frag, + v32f, v32sb, 2, 8>; + defm WFCHXB : BinaryVRRcSPair<"wfchxb", 0xE7EB, null_frag, null_frag, + v128q, v128xb, 4, 8>; + } + + // Compare and signal high. + let Predicates = [FeatureVectorEnhancements1] in { + defm VFKHDB : BinaryVRRcSPair<"vfkhdb", 0xE7EB, null_frag, null_frag, + v128g, v128db, 3, 4>; + defm WFKHDB : BinaryVRRcSPair<"wfkhdb", 0xE7EB, null_frag, null_frag, + v64g, v64db, 3, 12>; + defm VFKHSB : BinaryVRRcSPair<"vfkhsb", 0xE7EB, null_frag, null_frag, + v128f, v128sb, 2, 4>; + defm WFKHSB : BinaryVRRcSPair<"wfkhsb", 0xE7EB, null_frag, null_frag, + v32f, v32sb, 2, 12>; + defm WFKHXB : BinaryVRRcSPair<"wfkhxb", 0xE7EB, null_frag, null_frag, + v128q, v128xb, 4, 12>; + } // Compare high or equal. def VFCHE : BinaryVRRcSPairFloatGeneric<"vfche", 0xE7EA>; @@ -1013,6 +1282,28 @@ let Predicates = [FeatureVector] in { v128g, v128db, 3, 0>; defm WFCHEDB : BinaryVRRcSPair<"wfchedb", 0xE7EA, null_frag, null_frag, v64g, v64db, 3, 8>; + let Predicates = [FeatureVectorEnhancements1] in { + defm VFCHESB : BinaryVRRcSPair<"vfchesb", 0xE7EA, z_vfcmphe, z_vfcmphes, + v128f, v128sb, 2, 0>; + defm WFCHESB : BinaryVRRcSPair<"wfchesb", 0xE7EA, null_frag, null_frag, + v32f, v32sb, 2, 8>; + defm WFCHEXB : BinaryVRRcSPair<"wfchexb", 0xE7EA, null_frag, null_frag, + v128q, v128xb, 4, 8>; + } + + // Compare and signal high or equal. + let Predicates = [FeatureVectorEnhancements1] in { + defm VFKHEDB : BinaryVRRcSPair<"vfkhedb", 0xE7EA, null_frag, null_frag, + v128g, v128db, 3, 4>; + defm WFKHEDB : BinaryVRRcSPair<"wfkhedb", 0xE7EA, null_frag, null_frag, + v64g, v64db, 3, 12>; + defm VFKHESB : BinaryVRRcSPair<"vfkhesb", 0xE7EA, null_frag, null_frag, + v128f, v128sb, 2, 4>; + defm WFKHESB : BinaryVRRcSPair<"wfkhesb", 0xE7EA, null_frag, null_frag, + v32f, v32sb, 2, 12>; + defm WFKHEXB : BinaryVRRcSPair<"wfkhexb", 0xE7EA, null_frag, null_frag, + v128q, v128xb, 4, 12>; + } } //===----------------------------------------------------------------------===// @@ -1024,36 +1315,49 @@ def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>; def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>; def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>; def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>; +def : Pat<(v16i8 (bitconvert (f128 VR128:$src))), (v16i8 VR128:$src)>; def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>; def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>; def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>; def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>; def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>; +def : Pat<(v8i16 (bitconvert (f128 VR128:$src))), (v8i16 VR128:$src)>; def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>; def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>; def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>; def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>; def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>; +def : Pat<(v4i32 (bitconvert (f128 VR128:$src))), (v4i32 VR128:$src)>; def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>; def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>; def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>; def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>; def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>; +def : Pat<(v2i64 (bitconvert (f128 VR128:$src))), (v2i64 VR128:$src)>; def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>; def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>; def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>; def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>; def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>; +def : Pat<(v4f32 (bitconvert (f128 VR128:$src))), (v4f32 VR128:$src)>; def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>; def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>; def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>; def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>; def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>; +def : Pat<(v2f64 (bitconvert (f128 VR128:$src))), (v2f64 VR128:$src)>; + +def : Pat<(f128 (bitconvert (v16i8 VR128:$src))), (f128 VR128:$src)>; +def : Pat<(f128 (bitconvert (v8i16 VR128:$src))), (f128 VR128:$src)>; +def : Pat<(f128 (bitconvert (v4i32 VR128:$src))), (f128 VR128:$src)>; +def : Pat<(f128 (bitconvert (v2i64 VR128:$src))), (f128 VR128:$src)>; +def : Pat<(f128 (bitconvert (v4f32 VR128:$src))), (f128 VR128:$src)>; +def : Pat<(f128 (bitconvert (v2f64 VR128:$src))), (f128 VR128:$src)>; //===----------------------------------------------------------------------===// // Replicating scalars @@ -1130,6 +1434,20 @@ let AddedComplexity = 4 in { } //===----------------------------------------------------------------------===// +// Support for 128-bit floating-point values in vector registers +//===----------------------------------------------------------------------===// + +let Predicates = [FeatureVectorEnhancements1] in { + def : Pat<(f128 (load bdxaddr12only:$addr)), + (VL bdxaddr12only:$addr)>; + def : Pat<(store (f128 VR128:$src), bdxaddr12only:$addr), + (VST VR128:$src, bdxaddr12only:$addr)>; + + def : Pat<(f128 fpimm0), (VZERO)>; + def : Pat<(f128 fpimmneg0), (WFLNXB (VZERO))>; +} + +//===----------------------------------------------------------------------===// // String instructions //===----------------------------------------------------------------------===// @@ -1198,3 +1516,37 @@ let Predicates = [FeatureVector] in { defm VSTRCZF : QuaternaryOptVRRdSPair<"vstrczf", 0xE78A, int_s390_vstrczf, z_vstrcz_cc, v128f, v128f, 2, 2>; } + +//===----------------------------------------------------------------------===// +// Packed-decimal instructions +//===----------------------------------------------------------------------===// + +let Predicates = [FeatureVectorPackedDecimal] in { + def VLIP : BinaryVRIh<"vlip", 0xE649>; + + def VPKZ : BinaryVSI<"vpkz", 0xE634, null_frag, 0>; + def VUPKZ : StoreLengthVSI<"vupkz", 0xE63C, null_frag, 0>; + + let Defs = [CC] in { + def VCVB : BinaryVRRi<"vcvb", 0xE650, GR32>; + def VCVBG : BinaryVRRi<"vcvbg", 0xE652, GR64>; + def VCVD : TernaryVRIi<"vcvd", 0xE658, GR32>; + def VCVDG : TernaryVRIi<"vcvdg", 0xE65A, GR64>; + + def VAP : QuaternaryVRIf<"vap", 0xE671>; + def VSP : QuaternaryVRIf<"vsp", 0xE673>; + + def VMP : QuaternaryVRIf<"vmp", 0xE678>; + def VMSP : QuaternaryVRIf<"vmsp", 0xE679>; + + def VDP : QuaternaryVRIf<"vdp", 0xE67A>; + def VRP : QuaternaryVRIf<"vrp", 0xE67B>; + def VSDP : QuaternaryVRIf<"vsdp", 0xE67E>; + + def VSRP : QuaternaryVRIg<"vsrp", 0xE659>; + def VPSOP : QuaternaryVRIg<"vpsop", 0xE65B>; + + def VTP : TestVRRg<"vtp", 0xE65F>; + def VCP : CompareVRRh<"vcp", 0xE677>; + } +} diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp index ec8ce6e..d4cd89c 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZLDCleanup.cpp @@ -13,8 +13,8 @@ // //===----------------------------------------------------------------------===// -#include "SystemZTargetMachine.h" #include "SystemZMachineFunctionInfo.h" +#include "SystemZTargetMachine.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -127,7 +127,7 @@ MachineInstr *SystemZLDCleanup::ReplaceTLSCall(MachineInstr *I, return Copy; } -// Create a virtal register in *TLSBaseAddrReg, and populate it by +// Create a virtual register in *TLSBaseAddrReg, and populate it by // inserting a copy instruction after I. Returns the new instruction. MachineInstr *SystemZLDCleanup::SetRegister(MachineInstr *I, unsigned *TLSBaseAddrReg) { diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp index 14ff6af..791f033 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -53,15 +53,21 @@ // //===----------------------------------------------------------------------===// +#include "SystemZ.h" +#include "SystemZInstrInfo.h" #include "SystemZTargetMachine.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/IR/Function.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/Support/ErrorHandling.h" +#include <cassert> +#include <cstdint> using namespace llvm; @@ -70,72 +76,72 @@ using namespace llvm; STATISTIC(LongBranches, "Number of long branches."); namespace { + // Represents positional information about a basic block. struct MBBInfo { // The address that we currently assume the block has. - uint64_t Address; + uint64_t Address = 0; // The size of the block in bytes, excluding terminators. // This value never changes. - uint64_t Size; + uint64_t Size = 0; // The minimum alignment of the block, as a log2 value. // This value never changes. - unsigned Alignment; + unsigned Alignment = 0; // The number of terminators in this block. This value never changes. - unsigned NumTerminators; + unsigned NumTerminators = 0; - MBBInfo() - : Address(0), Size(0), Alignment(0), NumTerminators(0) {} + MBBInfo() = default; }; // Represents the state of a block terminator. struct TerminatorInfo { // If this terminator is a relaxable branch, this points to the branch // instruction, otherwise it is null. - MachineInstr *Branch; + MachineInstr *Branch = nullptr; // The address that we currently assume the terminator has. - uint64_t Address; + uint64_t Address = 0; // The current size of the terminator in bytes. - uint64_t Size; + uint64_t Size = 0; // If Branch is nonnull, this is the number of the target block, // otherwise it is unused. - unsigned TargetBlock; + unsigned TargetBlock = 0; // If Branch is nonnull, this is the length of the longest relaxed form, // otherwise it is zero. - unsigned ExtraRelaxSize; + unsigned ExtraRelaxSize = 0; - TerminatorInfo() : Branch(nullptr), Size(0), TargetBlock(0), - ExtraRelaxSize(0) {} + TerminatorInfo() = default; }; // Used to keep track of the current position while iterating over the blocks. struct BlockPosition { // The address that we assume this position has. - uint64_t Address; + uint64_t Address = 0; // The number of low bits in Address that are known to be the same // as the runtime address. unsigned KnownBits; - BlockPosition(unsigned InitialAlignment) - : Address(0), KnownBits(InitialAlignment) {} + BlockPosition(unsigned InitialAlignment) : KnownBits(InitialAlignment) {} }; class SystemZLongBranch : public MachineFunctionPass { public: static char ID; + SystemZLongBranch(const SystemZTargetMachine &tm) - : MachineFunctionPass(ID), TII(nullptr) {} + : MachineFunctionPass(ID) {} StringRef getPassName() const override { return "SystemZ Long Branch"; } bool runOnMachineFunction(MachineFunction &F) override; + MachineFunctionProperties getRequiredProperties() const override { return MachineFunctionProperties().set( MachineFunctionProperties::Property::NoVRegs); @@ -155,7 +161,7 @@ private: void relaxBranch(TerminatorInfo &Terminator); void relaxBranches(); - const SystemZInstrInfo *TII; + const SystemZInstrInfo *TII = nullptr; MachineFunction *MF; SmallVector<MBBInfo, 16> MBBs; SmallVector<TerminatorInfo, 16> Terminators; @@ -165,11 +171,8 @@ char SystemZLongBranch::ID = 0; const uint64_t MaxBackwardRange = 0x10000; const uint64_t MaxForwardRange = 0xfffe; -} // end anonymous namespace -FunctionPass *llvm::createSystemZLongBranchPass(SystemZTargetMachine &TM) { - return new SystemZLongBranch(TM); -} +} // end anonymous namespace // Position describes the state immediately before Block. Update Block // accordingly and move Position to the end of the block's non-terminator @@ -354,13 +357,13 @@ void SystemZLongBranch::splitBranchOnCount(MachineInstr *MI, MachineBasicBlock *MBB = MI->getParent(); DebugLoc DL = MI->getDebugLoc(); BuildMI(*MBB, MI, DL, TII->get(AddOpcode)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)) - .addImm(-1); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) + .addImm(-1); MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) - .addImm(SystemZ::CCMASK_ICMP) - .addImm(SystemZ::CCMASK_CMP_NE) - .addOperand(MI->getOperand(2)); + .addImm(SystemZ::CCMASK_ICMP) + .addImm(SystemZ::CCMASK_CMP_NE) + .add(MI->getOperand(2)); // The implicit use of CC is a killing use. BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); MI->eraseFromParent(); @@ -373,12 +376,12 @@ void SystemZLongBranch::splitCompareBranch(MachineInstr *MI, MachineBasicBlock *MBB = MI->getParent(); DebugLoc DL = MI->getDebugLoc(); BuildMI(*MBB, MI, DL, TII->get(CompareOpcode)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)); MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) - .addImm(SystemZ::CCMASK_ICMP) - .addOperand(MI->getOperand(2)) - .addOperand(MI->getOperand(3)); + .addImm(SystemZ::CCMASK_ICMP) + .add(MI->getOperand(2)) + .add(MI->getOperand(3)); // The implicit use of CC is a killing use. BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); MI->eraseFromParent(); @@ -463,3 +466,7 @@ bool SystemZLongBranch::runOnMachineFunction(MachineFunction &F) { relaxBranches(); return true; } + +FunctionPass *llvm::createSystemZLongBranchPass(SystemZTargetMachine &TM) { + return new SystemZLongBranch(TM); +} diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp index ab6020f..8342463 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.cpp @@ -18,12 +18,12 @@ using namespace llvm; -#define DEBUG_TYPE "misched" +#define DEBUG_TYPE "machine-scheduler" #ifndef NDEBUG // Print the set of SUs void SystemZPostRASchedStrategy::SUSet:: -dump(SystemZHazardRecognizer &HazardRec) { +dump(SystemZHazardRecognizer &HazardRec) const { dbgs() << "{"; for (auto &SU : *this) { HazardRec.dumpSU(SU, dbgs()); diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h b/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h index b919758..3dfef38 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h +++ b/contrib/llvm/lib/Target/SystemZ/SystemZMachineScheduler.h @@ -1,4 +1,4 @@ -//==-- SystemZMachineScheduler.h - SystemZ Scheduler Interface -*- C++ -*---==// +//==- SystemZMachineScheduler.h - SystemZ Scheduler Interface ----*- C++ -*-==// // // The LLVM Compiler Infrastructure // @@ -14,10 +14,10 @@ // usage of processor resources. //===----------------------------------------------------------------------===// -#include "SystemZInstrInfo.h" #include "SystemZHazardRecognizer.h" #include "llvm/CodeGen/MachineScheduler.h" -#include "llvm/Support/Debug.h" +#include "llvm/CodeGen/ScheduleDAG.h" +#include <set> #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZMACHINESCHEDULER_H #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZMACHINESCHEDULER_H @@ -28,29 +28,29 @@ namespace llvm { /// A MachineSchedStrategy implementation for SystemZ post RA scheduling. class SystemZPostRASchedStrategy : public MachineSchedStrategy { - ScheduleDAGMI *DAG; + ScheduleDAGMI *DAG; /// A candidate during instruction evaluation. struct Candidate { - SUnit *SU; + SUnit *SU = nullptr; /// The decoding cost. - int GroupingCost; + int GroupingCost = 0; /// The processor resources cost. - int ResourcesCost; + int ResourcesCost = 0; - Candidate() : SU(nullptr), GroupingCost(0), ResourcesCost(0) {} + Candidate() = default; Candidate(SUnit *SU_, SystemZHazardRecognizer &HazardRec); // Compare two candidates. bool operator<(const Candidate &other); // Check if this node is free of cost ("as good as any"). - bool inline noCost() { + bool noCost() const { return (GroupingCost <= 0 && !ResourcesCost); } - }; + }; // A sorter for the Available set that makes sure that SUs are considered // in the best order. @@ -72,7 +72,7 @@ class SystemZPostRASchedStrategy : public MachineSchedStrategy { // A set of SUs with a sorter and dump method. struct SUSet : std::set<SUnit*, SUSorter> { #ifndef NDEBUG - void dump(SystemZHazardRecognizer &HazardRec); + void dump(SystemZHazardRecognizer &HazardRec) const; #endif }; @@ -83,7 +83,7 @@ class SystemZPostRASchedStrategy : public MachineSchedStrategy { // region. SystemZHazardRecognizer HazardRec; - public: +public: SystemZPostRASchedStrategy(const MachineSchedContext *C); /// PostRA scheduling does not track pressure. @@ -107,6 +107,6 @@ class SystemZPostRASchedStrategy : public MachineSchedStrategy { void releaseBottomNode(SUnit *SU) override {}; }; -} // namespace llvm +} // end namespace llvm -#endif /* LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZMACHINESCHEDULER_H */ +#endif // LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZMACHINESCHEDULER_H diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td index 7bb4fe5..7136121 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td @@ -531,6 +531,7 @@ def BDAddr64Disp12 : AddressAsmOperand<"BDAddr", "64", "12">; def BDAddr64Disp20 : AddressAsmOperand<"BDAddr", "64", "20">; def BDXAddr64Disp12 : AddressAsmOperand<"BDXAddr", "64", "12">; def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">; +def BDLAddr64Disp12Len4 : AddressAsmOperand<"BDLAddr", "64", "12", "Len4">; def BDLAddr64Disp12Len8 : AddressAsmOperand<"BDLAddr", "64", "12", "Len8">; def BDRAddr64Disp12 : AddressAsmOperand<"BDRAddr", "64", "12">; def BDVAddr64Disp12 : AddressAsmOperand<"BDVAddr", "64", "12">; @@ -578,6 +579,7 @@ def bdxaddr20pair : BDXMode<"BDXAddr", "64", "20", "Pair">; def dynalloc12only : BDXMode<"DynAlloc", "64", "12", "Only">; def laaddr12pair : BDXMode<"LAAddr", "64", "12", "Pair">; def laaddr20pair : BDXMode<"LAAddr", "64", "20", "Pair">; +def bdladdr12onlylen4 : BDLMode<"BDLAddr", "64", "12", "Only", "4">; def bdladdr12onlylen8 : BDLMode<"BDLAddr", "64", "12", "Only", "8">; def bdraddr12only : BDRMode<"BDRAddr", "64", "12", "Only">; def bdvaddr12only : BDVMode< "64", "12">; diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td index fde26ed..759a8bb 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td @@ -10,7 +10,8 @@ //===----------------------------------------------------------------------===// // Type profiles //===----------------------------------------------------------------------===// -def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i64>]>; +def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i64>, + SDTCisVT<1, i64>]>; def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i64>, SDTCisVT<1, i64>]>; def SDT_ZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; @@ -35,14 +36,10 @@ def SDT_ZWrapOffset : SDTypeProfile<1, 2, SDTCisSameAs<0, 2>, SDTCisPtrTy<0>]>; def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>; -def SDT_ZGR128Binary32 : SDTypeProfile<1, 2, +def SDT_ZGR128Binary : SDTypeProfile<1, 2, [SDTCisVT<0, untyped>, - SDTCisVT<1, untyped>, - SDTCisVT<2, i32>]>; -def SDT_ZGR128Binary64 : SDTypeProfile<1, 2, - [SDTCisVT<0, untyped>, - SDTCisVT<1, untyped>, - SDTCisVT<2, i64>]>; + SDTCisInt<1>, + SDTCisInt<2>]>; def SDT_ZAtomicLoadBinaryW : SDTypeProfile<1, 5, [SDTCisVT<0, i32>, SDTCisPtrTy<1>, @@ -184,14 +181,11 @@ def z_select_ccmask : SDNode<"SystemZISD::SELECT_CCMASK", SDT_ZSelectCCMask, [SDNPInGlue]>; def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>; def z_popcnt : SDNode<"SystemZISD::POPCNT", SDTIntUnaryOp>; -def z_umul_lohi64 : SDNode<"SystemZISD::UMUL_LOHI64", SDT_ZGR128Binary64>; -def z_sdivrem32 : SDNode<"SystemZISD::SDIVREM32", SDT_ZGR128Binary32>; -def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>; -def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>; -def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>; - -def z_serialize : SDNode<"SystemZISD::SERIALIZE", SDTNone, - [SDNPHasChain, SDNPMayStore]>; +def z_smul_lohi : SDNode<"SystemZISD::SMUL_LOHI", SDT_ZGR128Binary>; +def z_umul_lohi : SDNode<"SystemZISD::UMUL_LOHI", SDT_ZGR128Binary>; +def z_sdivrem : SDNode<"SystemZISD::SDIVREM", SDT_ZGR128Binary>; +def z_udivrem : SDNode<"SystemZISD::UDIVREM", SDT_ZGR128Binary>; + def z_membarrier : SDNode<"SystemZISD::MEMBARRIER", SDTNone, [SDNPHasChain, SDNPSideEffect]>; @@ -556,6 +550,12 @@ def z_fma : PatFrag<(ops node:$src1, node:$src2, node:$src3), def z_fms : PatFrag<(ops node:$src1, node:$src2, node:$src3), (fma node:$src2, node:$src3, (fneg node:$src1))>; +// Negative fused multiply-add and multiply-subtract. +def fnma : PatFrag<(ops node:$src1, node:$src2, node:$src3), + (fneg (fma node:$src1, node:$src2, node:$src3))>; +def fnms : PatFrag<(ops node:$src1, node:$src2, node:$src3), + (fneg (fms node:$src1, node:$src2, node:$src3))>; + // Floating-point negative absolute. def fnabs : PatFrag<(ops node:$ptr), (fneg (fabs node:$ptr))>; @@ -631,6 +631,19 @@ def z_vllezf64 : PatFrag<(ops node:$addr), (scalar_to_vector (f64 (load node:$addr))), (z_vzero))>; +// Similarly for the high element of a zeroed vector. +def z_vllezli32 : z_vllez<i32, load, 0>; +def z_vllezlf32 : PatFrag<(ops node:$addr), + (bitconvert + (z_merge_high + (v2i64 + (bitconvert + (z_merge_high + (v4f32 (scalar_to_vector + (f32 (load node:$addr)))), + (v4f32 (z_vzero))))), + (v2i64 (z_vzero))))>; + // Store one element of a vector. class z_vste<ValueType scalartype, SDPatternOperator store> : PatFrag<(ops node:$vec, node:$addr, node:$index), diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td index 16a7ed7..152521f 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td @@ -167,3 +167,10 @@ class FPConversion<Instruction insn, SDPatternOperator operator, TypedReg tr1, TypedReg tr2, bits<3> suppress, bits<4> mode> : Pat<(tr1.vt (operator (tr2.vt tr2.op:$vec))), (insn tr2.op:$vec, suppress, mode)>; + +// Use INSN to perform mininum/maximum operation OPERATOR on type TR. +// FUNCTION is the type of minimum/maximum function to perform. +class FPMinMax<Instruction insn, SDPatternOperator operator, TypedReg tr, + bits<4> function> + : Pat<(tr.vt (operator (tr.vt tr.op:$vec1), (tr.vt tr.op:$vec2))), + (insn tr.op:$vec1, tr.op:$vec2, function)>; diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td index 1cdc094..0dca458 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td @@ -33,3 +33,6 @@ def : ProcessorModel<"zEC12", ZEC12Model, Arch10SupportedFeatures.List>; def : ProcessorModel<"arch11", Z13Model, Arch11SupportedFeatures.List>; def : ProcessorModel<"z13", Z13Model, Arch11SupportedFeatures.List>; +def : ProcessorModel<"arch12", Z14Model, Arch12SupportedFeatures.List>; +def : ProcessorModel<"z14", Z14Model, Arch12SupportedFeatures.List>; + diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp index 6ef8000..d14a0fb 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -7,8 +7,8 @@ // //===----------------------------------------------------------------------===// -#include "SystemZInstrInfo.h" #include "SystemZRegisterInfo.h" +#include "SystemZInstrInfo.h" #include "SystemZSubtarget.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td index 47d2f75..52ba1a5 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td @@ -260,10 +260,10 @@ defm VF128 : SystemZRegClass<"VF128", // All vector registers. defm VR128 : SystemZRegClass<"VR128", - [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128, - (add (sequence "V%u", 0, 7), - (sequence "V%u", 16, 31), - (sequence "V%u", 8, 15))>; + [f128, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], + 128, (add (sequence "V%u", 0, 7), + (sequence "V%u", 16, 31), + (sequence "V%u", 8, 15))>; // Attaches a ValueType to a register operand, to make the instruction // definitions easier. @@ -272,7 +272,8 @@ class TypedReg<ValueType vtin, RegisterOperand opin> { RegisterOperand op = opin; } -def v32eb : TypedReg<f32, VR32>; +def v32f : TypedReg<i32, VR32>; +def v32sb : TypedReg<f32, VR32>; def v64g : TypedReg<i64, VR64>; def v64db : TypedReg<f64, VR64>; def v128b : TypedReg<v16i8, VR128>; @@ -280,8 +281,9 @@ def v128h : TypedReg<v8i16, VR128>; def v128f : TypedReg<v4i32, VR128>; def v128g : TypedReg<v2i64, VR128>; def v128q : TypedReg<v16i8, VR128>; -def v128eb : TypedReg<v4f32, VR128>; +def v128sb : TypedReg<v4f32, VR128>; def v128db : TypedReg<v2f64, VR128>; +def v128xb : TypedReg<f128, VR128>; def v128any : TypedReg<untyped, VR128>; //===----------------------------------------------------------------------===// @@ -304,3 +306,13 @@ foreach I = 0-15 in { defm AR32 : SystemZRegClass<"AR32", [i32], 32, (add (sequence "A%u", 0, 15)), 0>; +// Control registers. +class CREG64<bits<16> num, string n> : SystemZReg<n> { + let HWEncoding = num; +} +foreach I = 0-15 in { + def C#I : CREG64<I, "c"#I>, DwarfRegNum<[!add(I, 32)]>; +} +defm CR64 : SystemZRegClass<"CR64", [i64], 64, + (add (sequence "C%u", 0, 15)), 0>; + diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSchedule.td b/contrib/llvm/lib/Target/SystemZ/SystemZSchedule.td index dbba8ab..8dba89f 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZSchedule.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZSchedule.td @@ -56,12 +56,16 @@ def LSU_lat1 : SchedWrite; // Floating point unit (zEC12 and earlier) def FPU : SchedWrite; def FPU2 : SchedWrite; +def DFU : SchedWrite; +def DFU2 : SchedWrite; -// Vector sub units (z13) +// Vector sub units (z13 and later) def VecBF : SchedWrite; def VecBF2 : SchedWrite; def VecDF : SchedWrite; def VecDF2 : SchedWrite; +def VecDFX : SchedWrite; +def VecDFX2 : SchedWrite; def VecFPd : SchedWrite; // Blocking BFP div/sqrt unit. def VecMul : SchedWrite; def VecStr : SchedWrite; @@ -71,6 +75,7 @@ def VecXsPm : SchedWrite; def VBU : SchedWrite; +include "SystemZScheduleZ14.td" include "SystemZScheduleZ13.td" include "SystemZScheduleZEC12.td" include "SystemZScheduleZ196.td" diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td index e97d61d..72543c1 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td @@ -15,7 +15,7 @@ def Z13Model : SchedMachineModel { let UnsupportedFeatures = Arch11UnsupportedFeatures.List; - + let IssueWidth = 8; let MicroOpBufferSize = 60; // Issue queues let LoadLatency = 1; // Optimistic load latency. @@ -76,6 +76,8 @@ def : WriteRes<VecBF, [Z13_VecUnit]> { let Latency = 8; } def : WriteRes<VecBF2, [Z13_VecUnit, Z13_VecUnit]> { let Latency = 9; } def : WriteRes<VecDF, [Z13_VecUnit]> { let Latency = 8; } def : WriteRes<VecDF2, [Z13_VecUnit, Z13_VecUnit]> { let Latency = 9; } +def : WriteRes<VecDFX, [Z13_VecUnit]> { let Latency = 1; } +def : WriteRes<VecDFX2, [Z13_VecUnit, Z13_VecUnit]> { let Latency = 2; } def : WriteRes<VecFPd, [Z13_VecFPdUnit, Z13_VecFPdUnit, Z13_VecFPdUnit, Z13_VecFPdUnit, Z13_VecFPdUnit, Z13_VecFPdUnit, Z13_VecFPdUnit, Z13_VecFPdUnit, Z13_VecFPdUnit, @@ -157,7 +159,7 @@ def : InstRW<[FXb], (instregex "CondReturn$")>; // Select instructions //===----------------------------------------------------------------------===// -// Select pseudo +// Select pseudo def : InstRW<[FXa], (instregex "Select(32|64|32Mux)$")>; // CondStore pseudos @@ -179,6 +181,7 @@ def : InstRW<[FXb, LSU, Lat5], (instregex "MVI(Y)?$")>; // Move character def : InstRW<[FXb, LSU, LSU, LSU, Lat8, GroupAlone], (instregex "MVC$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCL(E|U)?$")>; // Pseudo -> reg move def : InstRW<[FXa], (instregex "COPY(_TO_REGCLASS)?$")>; @@ -223,7 +226,7 @@ def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVST$")>; def : InstRW<[FXa, Lat2], (instregex "LOCRMux$")>; def : InstRW<[FXa, Lat2], (instregex "LOC(G|FH)?R(Asm.*)?$")>; -def : InstRW<[FXa, Lat2], (instregex "LOC(G|H)?HI(Asm.*)?$")>; +def : InstRW<[FXa, Lat2], (instregex "LOC(G|H)?HI(Mux|(Asm.*))?$")>; def : InstRW<[FXa, LSU, Lat6], (instregex "LOC(G|FH|Mux)?(Asm.*)?$")>; def : InstRW<[FXb, LSU, Lat5], (instregex "STOC(G|FH|Mux)?(Asm.*)?$")>; @@ -268,6 +271,7 @@ def : InstRW<[FXb, LSU, Lat5], (instregex "LLG(F|T)?AT$")>; def : InstRW<[FXb, LSU, Lat5], (instregex "STC(H|Y|Mux)?$")>; def : InstRW<[FXb, LSU, Lat5], (instregex "STH(H|Y|RL|Mux)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "STCM(H|Y)?$")>; //===----------------------------------------------------------------------===// // Multi-register moves @@ -277,6 +281,9 @@ def : InstRW<[FXb, LSU, Lat5], (instregex "STH(H|Y|RL|Mux)?$")>; def : InstRW<[LSU, LSU, LSU, LSU, LSU, Lat10, GroupAlone], (instregex "LM(H|Y|G)?$")>; +// Load multiple disjoint +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "LMD$")>; + // Store multiple (estimated average of ceil(5/2) FXb ops) def : InstRW<[LSU, LSU, FXb, FXb, FXb, Lat10, GroupAlone], (instregex "STM(G|H|Y)?$")>; @@ -288,6 +295,7 @@ def : InstRW<[LSU, LSU, FXb, FXb, FXb, Lat10, def : InstRW<[FXa], (instregex "LRV(G)?R$")>; def : InstRW<[FXa, LSU, Lat5], (instregex "LRV(G|H)?$")>; def : InstRW<[FXb, LSU, Lat5], (instregex "STRV(G|H)?$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCIN$")>; //===----------------------------------------------------------------------===// // Load address instructions @@ -345,7 +353,10 @@ def : InstRW<[FXa], (instregex "ALGF(I|R)$")>; def : InstRW<[FXa], (instregex "ALGR(K)?$")>; def : InstRW<[FXa], (instregex "ALR(K)?$")>; def : InstRW<[FXa], (instregex "AR(K)?$")>; -def : InstRW<[FXb, LSU, Lat5], (instregex "A(G)?SI$")>; +def : InstRW<[FXa], (instregex "A(L)?HHHR$")>; +def : InstRW<[FXa, Lat2], (instregex "A(L)?HHLR$")>; +def : InstRW<[FXa], (instregex "ALSIH(N)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "A(L)?(G)?SI$")>; // Logical addition with carry def : InstRW<[FXa, LSU, Lat6, GroupAlone], (instregex "ALC(G)?$")>; @@ -368,6 +379,8 @@ def : InstRW<[FXa], (instregex "SLGF(I|R)$")>; def : InstRW<[FXa], (instregex "SLGR(K)?$")>; def : InstRW<[FXa], (instregex "SLR(K)?$")>; def : InstRW<[FXa], (instregex "SR(K)?$")>; +def : InstRW<[FXa], (instregex "S(L)?HHHR$")>; +def : InstRW<[FXa, Lat2], (instregex "S(L)?HHLR$")>; // Subtraction with borrow def : InstRW<[FXa, LSU, Lat6, GroupAlone], (instregex "SLB(G)?$")>; @@ -433,18 +446,22 @@ def : InstRW<[FXa, Lat6], (instregex "MS(R|FI)$")>; def : InstRW<[FXa, LSU, Lat12], (instregex "MSG$")>; def : InstRW<[FXa, Lat8], (instregex "MSGR$")>; def : InstRW<[FXa, Lat6], (instregex "MSGF(I|R)$")>; -def : InstRW<[FXa, LSU, Lat15, GroupAlone], (instregex "MLG$")>; -def : InstRW<[FXa, Lat9, GroupAlone], (instregex "MLGR$")>; +def : InstRW<[FXa2, LSU, Lat15, GroupAlone], (instregex "MLG$")>; +def : InstRW<[FXa2, Lat9, GroupAlone], (instregex "MLGR$")>; def : InstRW<[FXa, Lat5], (instregex "MGHI$")>; def : InstRW<[FXa, Lat5], (instregex "MHI$")>; def : InstRW<[FXa, LSU, Lat9], (instregex "MH(Y)?$")>; +def : InstRW<[FXa2, Lat7, GroupAlone], (instregex "M(L)?R$")>; +def : InstRW<[FXa2, LSU, Lat7, GroupAlone], (instregex "M(FY|L)?$")>; //===----------------------------------------------------------------------===// // Division and remainder //===----------------------------------------------------------------------===// -def : InstRW<[FXa, Lat30, GroupAlone], (instregex "DSG(F)?R$")>; -def : InstRW<[LSU, FXa, Lat30, GroupAlone], (instregex "DSG(F)?$")>; +def : InstRW<[FXa2, FXa2, Lat20, GroupAlone], (instregex "DR$")>; +def : InstRW<[FXa2, FXa2, LSU, Lat30, GroupAlone], (instregex "D$")>; +def : InstRW<[FXa2, Lat30, GroupAlone], (instregex "DSG(F)?R$")>; +def : InstRW<[LSU, FXa2, Lat30, GroupAlone], (instregex "DSG(F)?$")>; def : InstRW<[FXa2, FXa2, Lat20, GroupAlone], (instregex "DLR$")>; def : InstRW<[FXa2, FXa2, Lat30, GroupAlone], (instregex "DLGR$")>; def : InstRW<[FXa2, FXa2, LSU, Lat30, GroupAlone], (instregex "DL(G)?$")>; @@ -456,7 +473,9 @@ def : InstRW<[FXa2, FXa2, LSU, Lat30, GroupAlone], (instregex "DL(G)?$")>; def : InstRW<[FXa], (instregex "SLL(G|K)?$")>; def : InstRW<[FXa], (instregex "SRL(G|K)?$")>; def : InstRW<[FXa], (instregex "SRA(G|K)?$")>; -def : InstRW<[FXa], (instregex "SLA(K)?$")>; +def : InstRW<[FXa], (instregex "SLA(G|K)?$")>; +def : InstRW<[FXa, FXa, FXa, FXa, LSU, Lat8, GroupAlone], + (instregex "S(L|R)D(A|L)$")>; // Rotate def : InstRW<[FXa, LSU, Lat6], (instregex "RLL(G)?$")>; @@ -493,6 +512,8 @@ def : InstRW<[FXb], (instregex "CLIH$")>; def : InstRW<[FXb, LSU, Lat5], (instregex "CLI(Y)?$")>; def : InstRW<[FXb], (instregex "CLR$")>; def : InstRW<[FXb, LSU, Lat5], (instregex "CLRL$")>; +def : InstRW<[FXb], (instregex "C(L)?HHR$")>; +def : InstRW<[FXb, Lat2], (instregex "C(L)?HLR$")>; // Compare halfword def : InstRW<[FXb, LSU, Lat6], (instregex "CH(Y|RL)?$")>; @@ -505,7 +526,7 @@ def : InstRW<[FXb, Lat2], (instregex "CGFR$")>; // Compare logical character def : InstRW<[FXb, LSU, LSU, Lat9, BeginGroup], (instregex "CLC$")>; - +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLCL(E|U)?$")>; def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLST$")>; // Test under mask @@ -516,6 +537,9 @@ def : InstRW<[FXb], (instregex "TMHL(64)?$")>; def : InstRW<[FXb], (instregex "TMLH(64)?$")>; def : InstRW<[FXb], (instregex "TMLL(64)?$")>; +// Compare logical characters under mask +def : InstRW<[FXb, LSU, Lat6], (instregex "CLM(H|Y)?$")>; + //===----------------------------------------------------------------------===// // Prefetch and execution hint //===----------------------------------------------------------------------===// @@ -550,7 +574,7 @@ def : InstRW<[FXa, FXa, FXb, FXb, LSU, FXb, FXb, LSU, LSU, Lat20, GroupAlone], (instregex "CDSG$")>; // Compare and swap and store -def : InstRW<[FXa, Lat30, GroupAlone], (instregex "CSST$")>; +def : InstRW<[FXa, LSU, Lat30], (instregex "CSST$")>; // Perform locked operation def : InstRW<[LSU, Lat30, GroupAlone], (instregex "PLO$")>; @@ -563,6 +587,51 @@ def : InstRW<[FXb, FXb, LSU, Lat6, GroupAlone], (instregex "STPQ$")>; def : InstRW<[LSU, LSU, Lat5, GroupAlone], (instregex "LPD(G)?$")>; //===----------------------------------------------------------------------===// +// Translate and convert +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "TR$")>; +def : InstRW<[FXa, FXa, FXa, LSU, LSU, Lat30, GroupAlone], (instregex "TRT$")>; +def : InstRW<[FXa, LSU, Lat30], (instregex "TRTR$")>; +def : InstRW<[FXa, Lat30], (instregex "TR(TR)?(T)?(E|EOpt)?$")>; +def : InstRW<[LSU, Lat30], (instregex "TR(T|O)(T|O)(Opt)?$")>; +def : InstRW<[FXa, Lat30], (instregex "CU(12|14|21|24|41|42)(Opt)?$")>; +def : InstRW<[FXa, Lat30], (instregex "(CUUTF|CUTFU)(Opt)?$")>; + +//===----------------------------------------------------------------------===// +// Message-security assist +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, Lat30], (instregex "KM(C|F|O|CTR)?$")>; +def : InstRW<[FXa, Lat30], (instregex "(KIMD|KLMD|KMAC|PCC|PPNO)$")>; + +//===----------------------------------------------------------------------===// +// Decimal arithmetic +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, VecDF, VecDF, LSU, LSU, Lat30, GroupAlone], + (instregex "CVBG$")>; +def : InstRW<[FXb, VecDF, LSU, Lat30, GroupAlone], (instregex "CVB(Y)?$")>; +def : InstRW<[FXb, FXb, FXb, VecDF2, VecDF2, LSU, Lat30, GroupAlone], + (instregex "CVDG$")>; +def : InstRW<[FXb, VecDF, FXb, LSU, Lat30, GroupAlone], (instregex "CVD(Y)?$")>; +def : InstRW<[LSU, Lat10, GroupAlone], (instregex "MVO$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MV(N|Z)$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "(PACK|PKA|PKU)$")>; +def : InstRW<[LSU, Lat12, GroupAlone], (instregex "UNPK(A|U)$")>; +def : InstRW<[FXb, LSU, LSU, Lat9, BeginGroup], (instregex "UNPK$")>; + +def : InstRW<[FXb, VecDFX, LSU, LSU, LSU, Lat9, GroupAlone], + (instregex "(A|S|ZA)P$")>; +def : InstRW<[FXb, VecDFX2, VecDFX2, LSU, LSU, LSU, Lat30, GroupAlone], + (instregex "(M|D)P$")>; +def : InstRW<[FXb, VecDFX, VecDFX, LSU, LSU, Lat15, GroupAlone], + (instregex "SRP$")>; +def : InstRW<[VecDFX, LSU, LSU, Lat5, GroupAlone], (instregex "CP$")>; +def : InstRW<[VecDFX, LSU, Lat4, BeginGroup], (instregex "TP$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "ED(MK)?$")>; + +//===----------------------------------------------------------------------===// // Access registers //===----------------------------------------------------------------------===// @@ -629,41 +698,29 @@ def : InstRW<[FXb], (instregex "PPA$")>; //===----------------------------------------------------------------------===// // Find leftmost one -def : InstRW<[FXa, Lat6, GroupAlone], (instregex "FLOGR$")>; +def : InstRW<[FXa, FXa, Lat6, GroupAlone], (instregex "FLOGR$")>; // Population count def : InstRW<[FXa, Lat3], (instregex "POPCNT$")>; // Extend -def : InstRW<[FXa], (instregex "AEXT128_64$")>; -def : InstRW<[FXa], (instregex "ZEXT128_(32|64)$")>; +def : InstRW<[FXa], (instregex "AEXT128$")>; +def : InstRW<[FXa], (instregex "ZEXT128$")>; // String instructions def : InstRW<[FXa, LSU, Lat30], (instregex "SRST$")>; +def : InstRW<[FXa, Lat30], (instregex "SRSTU$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CUSE$")>; -// Move with key -def : InstRW<[FXa, FXa, FXb, LSU, Lat8, GroupAlone], (instregex "MVCK$")>; - -// Extract CPU Time -def : InstRW<[FXa, Lat5, LSU], (instregex "ECTG$")>; +// Various complex instructions +def : InstRW<[LSU, Lat30], (instregex "CFC$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "UPT$")>; +def : InstRW<[LSU, Lat30], (instregex "CKSM$")>; +def : InstRW<[FXa, Lat30], (instregex "CMPSC$")>; // Execute def : InstRW<[FXb, GroupAlone], (instregex "EX(RL)?$")>; -// Program return -def : InstRW<[FXb, Lat30], (instregex "PR$")>; - -// Inline assembly -def : InstRW<[LSU, LSU, LSU, FXa, FXa, FXb, Lat9, GroupAlone], - (instregex "STCK(F)?$")>; -def : InstRW<[LSU, LSU, LSU, LSU, FXa, FXa, FXb, FXb, Lat11, GroupAlone], - (instregex "STCKE$")>; -def : InstRW<[FXa, LSU, Lat5], (instregex "STFLE$")>; -def : InstRW<[FXb, Lat30], (instregex "SVC$")>; - -// Store real address -def : InstRW<[FXb, LSU, Lat5], (instregex "STRAG$")>; - //===----------------------------------------------------------------------===// // .insn directive instructions //===----------------------------------------------------------------------===// @@ -786,7 +843,7 @@ def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXBR(A)?$")>; // Addition def : InstRW<[VecBF, LSU, Lat12], (instregex "A(E|D)B$")>; def : InstRW<[VecBF], (instregex "A(E|D)BR$")>; -def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "AXBR$")>; +def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXBR$")>; // Subtraction def : InstRW<[VecBF, LSU, Lat12], (instregex "S(E|D)B$")>; @@ -801,9 +858,9 @@ def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MXDBR$")>; def : InstRW<[VecDF2, VecDF2, Lat20, GroupAlone], (instregex "MXBR$")>; // Multiply and add / subtract -def : InstRW<[VecBF, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>; def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)EBR$")>; -def : InstRW<[VecBF, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>; def : InstRW<[VecBF], (instregex "M(A|S)DBR$")>; // Division @@ -811,14 +868,17 @@ def : InstRW<[VecFPd, LSU], (instregex "D(E|D)B$")>; def : InstRW<[VecFPd], (instregex "D(E|D)BR$")>; def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "DXBR$")>; +// Divide to integer +def : InstRW<[VecFPd, Lat30], (instregex "DI(E|D)BR$")>; + //===----------------------------------------------------------------------===// // FP: Comparisons //===----------------------------------------------------------------------===// // Compare -def : InstRW<[VecXsPm, LSU, Lat8], (instregex "C(E|D)B$")>; -def : InstRW<[VecXsPm, Lat4], (instregex "C(E|D)BR?$")>; -def : InstRW<[VecDF, VecDF, Lat20, GroupAlone], (instregex "CXBR$")>; +def : InstRW<[VecXsPm, LSU, Lat8], (instregex "(K|C)(E|D)B$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "(K|C)(E|D)BR?$")>; +def : InstRW<[VecDF, VecDF, Lat20, GroupAlone], (instregex "(K|C)XBR$")>; // Test Data Class def : InstRW<[LSU, VecXsPm, Lat9], (instregex "TC(E|D)B$")>; @@ -832,10 +892,246 @@ def : InstRW<[FXa, LSU, Lat4, GroupAlone], (instregex "EFPC$")>; def : InstRW<[FXb, LSU, Lat5, GroupAlone], (instregex "STFPC$")>; def : InstRW<[LSU, Lat3, GroupAlone], (instregex "SFPC$")>; def : InstRW<[LSU, LSU, Lat6, GroupAlone], (instregex "LFPC$")>; -def : InstRW<[FXa, Lat30, GroupAlone], (instregex "SFASR$")>; -def : InstRW<[FXa, LSU, Lat30, GroupAlone], (instregex "LFAS$")>; +def : InstRW<[FXa, Lat30], (instregex "SFASR$")>; +def : InstRW<[FXa, LSU, Lat30], (instregex "LFAS$")>; def : InstRW<[FXb, Lat3, GroupAlone], (instregex "SRNM(B|T)?$")>; + +// --------------------- Hexadecimal floating point ------------------------- // + +//===----------------------------------------------------------------------===// +// HFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[VecXsPm, Lat4], (instregex "LT(D|E)R$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LTXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[VecBF], (instregex "(LEDR|LRER)$")>; +def : InstRW<[VecBF], (instregex "LEXR$")>; +def : InstRW<[VecDF2], (instregex "(LDXR|LRDR)$")>; + +// Load lengthened +def : InstRW<[LSU], (instregex "LDE$")>; +def : InstRW<[FXb], (instregex "LDER$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "LX(D|E)$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "LX(D|E)R$")>; + +// Convert from fixed +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CE(F|G)R$")>; +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CD(F|G)R$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat12, GroupAlone], (instregex "CX(F|G)R$")>; + +// Convert to fixed +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CF(E|D)R$")>; +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CG(E|D)R$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat20, BeginGroup], (instregex "C(F|G)XR$")>; + +// Convert BFP to HFP / HFP to BFP. +def : InstRW<[VecBF], (instregex "THD(E)?R$")>; +def : InstRW<[VecBF], (instregex "TB(E)?DR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load Complement / Negative / Positive +def : InstRW<[VecXsPm, Lat4], (instregex "L(C|N|P)DR$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "L(C|N|P)ER$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "L(C|N|P)XR$")>; + +// Halve +def : InstRW<[VecBF], (instregex "H(E|D)R$")>; + +// Square root +def : InstRW<[VecFPd, LSU], (instregex "SQ(E|D)$")>; +def : InstRW<[VecFPd], (instregex "SQ(E|D)R$")>; +def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "SQXR$")>; + +// Load FP integer +def : InstRW<[VecBF], (instregex "FIER$")>; +def : InstRW<[VecBF], (instregex "FIDR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[VecBF, LSU, Lat12], (instregex "A(E|D|U|W)$")>; +def : InstRW<[VecBF], (instregex "A(E|D|U|W)R$")>; +def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXR$")>; + +// Subtraction +def : InstRW<[VecBF, LSU, Lat12], (instregex "S(E|D|U|W)$")>; +def : InstRW<[VecBF], (instregex "S(E|D|U|W)R$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "SXR$")>; + +// Multiply +def : InstRW<[VecBF, LSU, Lat12], (instregex "M(D|DE|E|EE)$")>; +def : InstRW<[VecBF], (instregex "M(D|DE|E|EE)R$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MXD$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MXDR$")>; +def : InstRW<[VecDF2, VecDF2, Lat20, GroupAlone], (instregex "MXR$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MY$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "MY(H|L)$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MYR$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "MY(H|L)R$")>; + +// Multiply and add / subtract +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)E$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)ER$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)D$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)DR$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "MAY(H|L)$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MAY$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "MAY(H|L)R$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MAYR$")>; + +// Division +def : InstRW<[VecFPd, LSU], (instregex "D(E|D)$")>; +def : InstRW<[VecFPd], (instregex "D(E|D)R$")>; +def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "DXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[VecBF, LSU, Lat12], (instregex "C(E|D)$")>; +def : InstRW<[VecBF], (instregex "C(E|D)R$")>; +def : InstRW<[VecDF, VecDF, Lat20, GroupAlone], (instregex "CXR$")>; + + +// ------------------------ Decimal floating point -------------------------- // + +//===----------------------------------------------------------------------===// +// DFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[VecDF], (instregex "LTDTR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LTXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[VecDF, Lat15], (instregex "LEDTR$")>; +def : InstRW<[VecDF, VecDF, Lat20], (instregex "LDXTR$")>; + +// Load lengthened +def : InstRW<[VecDF], (instregex "LDETR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LXDTR$")>; + +// Convert from fixed / logical +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "CD(F|G)TR(A)?$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat30, GroupAlone], (instregex "CX(F|G)TR(A)?$")>; +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "CDL(F|G)TR$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat30, GroupAlone], (instregex "CXL(F|G)TR$")>; + +// Convert to fixed / logical +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "C(F|G)DTR(A)?$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat30, BeginGroup], (instregex "C(F|G)XTR(A)?$")>; +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "CL(F|G)DTR$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat30, BeginGroup], (instregex "CL(F|G)XTR$")>; + +// Convert from / to signed / unsigned packed +def : InstRW<[FXb, VecDF, Lat9, BeginGroup], (instregex "CD(S|U)TR$")>; +def : InstRW<[FXb, FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "CX(S|U)TR$")>; +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "C(S|U)DTR$")>; +def : InstRW<[FXb, FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "C(S|U)XTR$")>; + +// Convert from / to zoned +def : InstRW<[LSU, VecDF, Lat11, BeginGroup], (instregex "CDZT$")>; +def : InstRW<[LSU, LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "CXZT$")>; +def : InstRW<[FXb, LSU, VecDF, Lat11, BeginGroup], (instregex "CZDT$")>; +def : InstRW<[FXb, LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "CZXT$")>; + +// Convert from / to packed +def : InstRW<[LSU, VecDF, Lat11, BeginGroup], (instregex "CDPT$")>; +def : InstRW<[LSU, LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "CXPT$")>; +def : InstRW<[FXb, LSU, VecDF, Lat11, BeginGroup], (instregex "CPDT$")>; +def : InstRW<[FXb, LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "CPXT$")>; + +// Perform floating-point operation +def : InstRW<[FXb, Lat30], (instregex "PFPO$")>; + +//===----------------------------------------------------------------------===// +// DFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load FP integer +def : InstRW<[VecDF], (instregex "FIDTR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXTR$")>; + +// Extract biased exponent +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "EEDTR$")>; +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "EEXTR$")>; + +// Extract significance +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "ESDTR$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat15, BeginGroup], (instregex "ESXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[VecDF], (instregex "ADTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXTR(A)?$")>; + +// Subtraction +def : InstRW<[VecDF], (instregex "SDTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "SXTR(A)?$")>; + +// Multiply +def : InstRW<[VecDF, Lat30], (instregex "MDTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat30, GroupAlone], (instregex "MXTR(A)?$")>; + +// Division +def : InstRW<[VecDF, Lat30], (instregex "DDTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat30, GroupAlone], (instregex "DXTR(A)?$")>; + +// Quantize +def : InstRW<[VecDF], (instregex "QADTR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "QAXTR$")>; + +// Reround +def : InstRW<[FXb, VecDF, Lat11, BeginGroup], (instregex "RRDTR$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "RRXTR$")>; + +// Shift significand left/right +def : InstRW<[LSU, VecDF, Lat11, GroupAlone], (instregex "S(L|R)DT$")>; +def : InstRW<[LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "S(L|R)XT$")>; + +// Insert biased exponent +def : InstRW<[FXb, VecDF, Lat11, BeginGroup], (instregex "IEDTR$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "IEXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[VecDF], (instregex "(K|C)DTR$")>; +def : InstRW<[VecDF, VecDF, Lat11, GroupAlone], (instregex "(K|C)XTR$")>; + +// Compare biased exponent +def : InstRW<[VecDF], (instregex "CEDTR$")>; +def : InstRW<[VecDF], (instregex "CEXTR$")>; + +// Test Data Class/Group +def : InstRW<[LSU, VecDF, Lat11], (instregex "TD(C|G)(E|D)T$")>; +def : InstRW<[LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "TD(C|G)XT$")>; + + // --------------------------------- Vector --------------------------------- // //===----------------------------------------------------------------------===// @@ -855,8 +1151,8 @@ def : InstRW<[VecXsPm], (instregex "VZERO$")>; def : InstRW<[VecXsPm], (instregex "VONE$")>; def : InstRW<[VecXsPm], (instregex "VGBM$")>; def : InstRW<[VecXsPm], (instregex "VGM(B|F|G|H)?$")>; -def : InstRW<[VecXsPm], (instregex "VLEI(B|F|G|H)$")>; def : InstRW<[VecXsPm], (instregex "VREPI(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VLEI(B|F|G|H)$")>; //===----------------------------------------------------------------------===// // Vector: Loads @@ -989,32 +1285,43 @@ def : InstRW<[VecStr, Lat5], (instregex "VTM$")>; // Vector: Floating-point arithmetic //===----------------------------------------------------------------------===// -def : InstRW<[VecBF2], (instregex "VCD(G|GB|LG|LGB)$")>; -def : InstRW<[VecBF], (instregex "WCD(GB|LGB)$")>; +// Conversion and rounding +def : InstRW<[VecBF2], (instregex "VCD(L)?G$")>; +def : InstRW<[VecBF2], (instregex "VCD(L)?GB$")>; +def : InstRW<[VecBF], (instregex "WCD(L)?GB$")>; def : InstRW<[VecBF2], (instregex "VC(L)?GD$")>; -def : InstRW<[VecBF2], (instregex "VFADB$")>; -def : InstRW<[VecBF], (instregex "WFADB$")>; -def : InstRW<[VecBF2], (instregex "VCGDB$")>; -def : InstRW<[VecBF], (instregex "WCGDB$")>; -def : InstRW<[VecBF2], (instregex "VF(I|M|A|S)$")>; -def : InstRW<[VecBF2], (instregex "VF(I|M|S)DB$")>; -def : InstRW<[VecBF], (instregex "WF(I|M|S)DB$")>; -def : InstRW<[VecBF2], (instregex "VCLGDB$")>; -def : InstRW<[VecBF], (instregex "WCLGDB$")>; -def : InstRW<[VecXsPm], (instregex "VFL(C|N|P)DB$")>; -def : InstRW<[VecXsPm], (instregex "WFL(C|N|P)DB$")>; -def : InstRW<[VecBF2], (instregex "VFM(A|S)$")>; -def : InstRW<[VecBF2], (instregex "VFM(A|S)DB$")>; -def : InstRW<[VecBF], (instregex "WFM(A|S)DB$")>; -def : InstRW<[VecXsPm], (instregex "VFPSO$")>; -def : InstRW<[VecXsPm], (instregex "(V|W)FPSODB$")>; -def : InstRW<[VecXsPm, Lat4], (instregex "VFTCI(DB)?$")>; -def : InstRW<[VecXsPm, Lat4], (instregex "WFTCIDB$")>; +def : InstRW<[VecBF2], (instregex "VC(L)?GDB$")>; +def : InstRW<[VecBF], (instregex "WC(L)?GDB$")>; def : InstRW<[VecBF2], (instregex "VL(DE|ED)$")>; def : InstRW<[VecBF2], (instregex "VL(DE|ED)B$")>; def : InstRW<[VecBF], (instregex "WL(DE|ED)B$")>; +def : InstRW<[VecBF2], (instregex "VFI$")>; +def : InstRW<[VecBF2], (instregex "VFIDB$")>; +def : InstRW<[VecBF], (instregex "WFIDB$")>; + +// Sign operations +def : InstRW<[VecXsPm], (instregex "VFPSO$")>; +def : InstRW<[VecXsPm], (instregex "(V|W)FPSODB$")>; +def : InstRW<[VecXsPm], (instregex "(V|W)FL(C|N|P)DB$")>; + +// Test data class +def : InstRW<[VecXsPm, Lat4], (instregex "VFTCI$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "(V|W)FTCIDB$")>; + +// Add / subtract +def : InstRW<[VecBF2], (instregex "VF(A|S)$")>; +def : InstRW<[VecBF2], (instregex "VF(A|S)DB$")>; +def : InstRW<[VecBF], (instregex "WF(A|S)DB$")>; + +// Multiply / multiply-and-add/subtract +def : InstRW<[VecBF2], (instregex "VFM$")>; +def : InstRW<[VecBF2], (instregex "VFMDB$")>; +def : InstRW<[VecBF], (instregex "WFMDB$")>; +def : InstRW<[VecBF2], (instregex "VFM(A|S)$")>; +def : InstRW<[VecBF2], (instregex "VFM(A|S)DB$")>; +def : InstRW<[VecBF], (instregex "WFM(A|S)DB$")>; -// divide / square root +// Divide / square root def : InstRW<[VecFPd], (instregex "VFD$")>; def : InstRW<[VecFPd], (instregex "(V|W)FDDB$")>; def : InstRW<[VecFPd], (instregex "VFSQ$")>; @@ -1026,10 +1333,10 @@ def : InstRW<[VecFPd], (instregex "(V|W)FSQDB$")>; def : InstRW<[VecXsPm], (instregex "VFC(E|H|HE)$")>; def : InstRW<[VecXsPm], (instregex "VFC(E|H|HE)DB$")>; -def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)$")>; def : InstRW<[VecXsPm], (instregex "WFC(E|H|HE)DB$")>; def : InstRW<[VecXsPm, Lat4], (instregex "VFC(E|H|HE)DBS$")>; def : InstRW<[VecXsPm, Lat4], (instregex "WFC(E|H|HE)DBS$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)$")>; def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)DB$")>; //===----------------------------------------------------------------------===// @@ -1060,5 +1367,163 @@ def : InstRW<[VecStr, Lat5], (instregex "VSTRC(B|F|H)S$")>; def : InstRW<[VecStr], (instregex "VSTRCZ(B|F|H)$")>; def : InstRW<[VecStr, Lat5], (instregex "VSTRCZ(B|F|H)S$")>; + +// -------------------------------- System ---------------------------------- // + +//===----------------------------------------------------------------------===// +// System: Program-Status Word Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "EPSW$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LPSW(E)?$")>; +def : InstRW<[FXa, Lat3, GroupAlone], (instregex "IPK$")>; +def : InstRW<[LSU, EndGroup], (instregex "SPKA$")>; +def : InstRW<[LSU, EndGroup], (instregex "SSM$")>; +def : InstRW<[FXb, LSU, GroupAlone], (instregex "ST(N|O)SM$")>; +def : InstRW<[FXa, Lat3], (instregex "IAC$")>; +def : InstRW<[LSU, EndGroup], (instregex "SAC(F)?$")>; + +//===----------------------------------------------------------------------===// +// System: Control Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "LCTL(G)?$")>; +def : InstRW<[LSU, Lat30], (instregex "STCT(L|G)$")>; +def : InstRW<[LSU], (instregex "E(P|S)A(I)?R$")>; +def : InstRW<[FXb, Lat30], (instregex "SSA(I)?R$")>; +def : InstRW<[FXb, Lat30], (instregex "ESEA$")>; + +//===----------------------------------------------------------------------===// +// System: Prefix-Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "SPX$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STPX$")>; + +//===----------------------------------------------------------------------===// +// System: Storage-Key and Real Memory Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "ISKE$")>; +def : InstRW<[FXb, Lat30], (instregex "IVSK$")>; +def : InstRW<[FXb, Lat30], (instregex "SSKE(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "RRB(E|M)$")>; +def : InstRW<[FXb, Lat30], (instregex "PFMF$")>; +def : InstRW<[FXb, Lat30], (instregex "TB$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "PGIN$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "PGOUT$")>; + +//===----------------------------------------------------------------------===// +// System: Dynamic-Address-Translation Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "IPTE(Opt)?(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "IDTE(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "CRDTE(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "PTLB$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "CSP(G)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LPTEA$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LRA(Y|G)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STRAG$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LURA(G)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STUR(A|G)$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "TPROT$")>; + +//===----------------------------------------------------------------------===// +// System: Memory-move Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, FXa, FXb, LSU, Lat8, GroupAlone], (instregex "MVC(K|P|S)$")>; +def : InstRW<[FXa, LSU, Lat6, GroupAlone], (instregex "MVC(S|D)K$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "MVCOS$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVPG$")>; + +//===----------------------------------------------------------------------===// +// System: Address-Space Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "LASP$")>; +def : InstRW<[LSU, GroupAlone], (instregex "PALB$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "PC$")>; +def : InstRW<[FXb, Lat30], (instregex "PR$")>; +def : InstRW<[FXb, Lat30], (instregex "PT(I)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "RP$")>; +def : InstRW<[FXb, Lat30], (instregex "BS(G|A)$")>; +def : InstRW<[FXb, Lat20], (instregex "TAR$")>; + +//===----------------------------------------------------------------------===// +// System: Linkage-Stack Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30, EndGroup], (instregex "BAKR$")>; +def : InstRW<[FXb, Lat30], (instregex "EREG(G)?$")>; +def : InstRW<[FXb, Lat30], (instregex "(E|M)STA$")>; + +//===----------------------------------------------------------------------===// +// System: Time-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "PTFF$")>; +def : InstRW<[FXb, LSU, Lat20], (instregex "SCK$")>; +def : InstRW<[FXb, Lat30], (instregex "SCKPF$")>; +def : InstRW<[FXb, LSU, Lat20], (instregex "SCKC$")>; +def : InstRW<[LSU, LSU, GroupAlone], (instregex "SPT$")>; +def : InstRW<[LSU, LSU, LSU, FXa, FXa, FXb, Lat9, GroupAlone], + (instregex "STCK(F)?$")>; +def : InstRW<[LSU, LSU, LSU, LSU, FXa, FXa, FXb, FXb, Lat11, GroupAlone], + (instregex "STCKE$")>; +def : InstRW<[FXb, LSU, Lat9], (instregex "STCKC$")>; +def : InstRW<[LSU, LSU, FXb, Lat5, BeginGroup], (instregex "STPT$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "STAP$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STIDP$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STSI$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STFL(E)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "ECAG$")>; +def : InstRW<[FXa, LSU, Lat30], (instregex "ECTG$")>; +def : InstRW<[FXb, Lat30], (instregex "PTF$")>; +def : InstRW<[FXb, Lat30], (instregex "PCKMO$")>; + +//===----------------------------------------------------------------------===// +// System: Miscellaneous Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "SVC$")>; +def : InstRW<[FXb, GroupAlone], (instregex "MC$")>; +def : InstRW<[FXb, Lat30], (instregex "DIAG$")>; +def : InstRW<[FXb], (instregex "TRAC(E|G)$")>; +def : InstRW<[FXb, Lat30], (instregex "TRAP(2|4)$")>; +def : InstRW<[FXb, Lat30], (instregex "SIGP$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "SIGA$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "SIE$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Measurement Facility Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb], (instregex "LPP$")>; +def : InstRW<[FXb, Lat30], (instregex "ECPGA$")>; +def : InstRW<[FXb, Lat30], (instregex "E(C|P)CTR$")>; +def : InstRW<[FXb, Lat30], (instregex "LCCTL$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "L(P|S)CTL$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "Q(S|CTR)I$")>; +def : InstRW<[FXb, Lat30], (instregex "S(C|P)CTR$")>; + +//===----------------------------------------------------------------------===// +// System: I/O Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "(C|H|R|X)SCH$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "(M|S|ST|T)SCH$")>; +def : InstRW<[FXb, Lat30], (instregex "RCHP$")>; +def : InstRW<[FXb, Lat30], (instregex "SCHM$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STC(PS|RW)$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "TPI$")>; +def : InstRW<[FXb, Lat30], (instregex "SAL$")>; + } diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ14.td b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ14.td new file mode 100644 index 0000000..698eb56 --- /dev/null +++ b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ14.td @@ -0,0 +1,1611 @@ +//-- SystemZScheduleZ14.td - SystemZ Scheduling Definitions ----*- tblgen -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the machine model for Z14 to support instruction +// scheduling and other instruction cost heuristics. +// +//===----------------------------------------------------------------------===// + +def Z14Model : SchedMachineModel { + + let UnsupportedFeatures = Arch12UnsupportedFeatures.List; + + let IssueWidth = 8; + let MicroOpBufferSize = 60; // Issue queues + let LoadLatency = 1; // Optimistic load latency. + + let PostRAScheduler = 1; + + // Extra cycles for a mispredicted branch. + let MispredictPenalty = 20; +} + +let SchedModel = Z14Model in { + +// These definitions could be put in a subtarget common include file, +// but it seems the include system in Tablegen currently rejects +// multiple includes of same file. +def : WriteRes<GroupAlone, []> { + let NumMicroOps = 0; + let BeginGroup = 1; + let EndGroup = 1; +} +def : WriteRes<BeginGroup, []> { + let NumMicroOps = 0; + let BeginGroup = 1; +} +def : WriteRes<EndGroup, []> { + let NumMicroOps = 0; + let EndGroup = 1; +} +def : WriteRes<Lat2, []> { let Latency = 2; let NumMicroOps = 0;} +def : WriteRes<Lat3, []> { let Latency = 3; let NumMicroOps = 0;} +def : WriteRes<Lat4, []> { let Latency = 4; let NumMicroOps = 0;} +def : WriteRes<Lat5, []> { let Latency = 5; let NumMicroOps = 0;} +def : WriteRes<Lat6, []> { let Latency = 6; let NumMicroOps = 0;} +def : WriteRes<Lat7, []> { let Latency = 7; let NumMicroOps = 0;} +def : WriteRes<Lat8, []> { let Latency = 8; let NumMicroOps = 0;} +def : WriteRes<Lat9, []> { let Latency = 9; let NumMicroOps = 0;} +def : WriteRes<Lat10, []> { let Latency = 10; let NumMicroOps = 0;} +def : WriteRes<Lat11, []> { let Latency = 11; let NumMicroOps = 0;} +def : WriteRes<Lat12, []> { let Latency = 12; let NumMicroOps = 0;} +def : WriteRes<Lat15, []> { let Latency = 15; let NumMicroOps = 0;} +def : WriteRes<Lat20, []> { let Latency = 20; let NumMicroOps = 0;} +def : WriteRes<Lat30, []> { let Latency = 30; let NumMicroOps = 0;} + +// Execution units. +def Z14_FXaUnit : ProcResource<2>; +def Z14_FXbUnit : ProcResource<2>; +def Z14_LSUnit : ProcResource<2>; +def Z14_VecUnit : ProcResource<2>; +def Z14_VecFPdUnit : ProcResource<2> { let BufferSize = 1; /* blocking */ } +def Z14_VBUnit : ProcResource<2>; + +// Subtarget specific definitions of scheduling resources. +def : WriteRes<FXa, [Z14_FXaUnit]> { let Latency = 1; } +def : WriteRes<FXa2, [Z14_FXaUnit, Z14_FXaUnit]> { let Latency = 2; } +def : WriteRes<FXb, [Z14_FXbUnit]> { let Latency = 1; } +def : WriteRes<LSU, [Z14_LSUnit]> { let Latency = 4; } +def : WriteRes<VecBF, [Z14_VecUnit]> { let Latency = 8; } +def : WriteRes<VecBF2, [Z14_VecUnit, Z14_VecUnit]> { let Latency = 9; } +def : WriteRes<VecDF, [Z14_VecUnit]> { let Latency = 8; } +def : WriteRes<VecDF2, [Z14_VecUnit, Z14_VecUnit]> { let Latency = 9; } +def : WriteRes<VecDFX, [Z14_VecUnit]> { let Latency = 1; } +def : WriteRes<VecDFX2, [Z14_VecUnit, Z14_VecUnit]> { let Latency = 2; } +def : WriteRes<VecFPd, [Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit, + Z14_VecFPdUnit, Z14_VecFPdUnit, Z14_VecFPdUnit]> + { let Latency = 30; } +def : WriteRes<VecMul, [Z14_VecUnit]> { let Latency = 5; } +def : WriteRes<VecStr, [Z14_VecUnit]> { let Latency = 4; } +def : WriteRes<VecXsPm, [Z14_VecUnit]> { let Latency = 3; } +def : WriteRes<VBU, [Z14_VBUnit]>; // Virtual Branching Unit + +// -------------------------- INSTRUCTIONS ---------------------------------- // + +// InstRW constructs have been used in order to preserve the +// readability of the InstrInfo files. + +// For each instruction, as matched by a regexp, provide a list of +// resources that it needs. These will be combined into a SchedClass. + +//===----------------------------------------------------------------------===// +// Stack allocation +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "ADJDYNALLOC$")>; // Pseudo -> LA / LAY + +//===----------------------------------------------------------------------===// +// Branch instructions +//===----------------------------------------------------------------------===// + +// Branch +def : InstRW<[VBU], (instregex "(Call)?BRC(L)?(Asm.*)?$")>; +def : InstRW<[VBU], (instregex "(Call)?J(G)?(Asm.*)?$")>; +def : InstRW<[FXb], (instregex "(Call)?BC(R)?(Asm.*)?$")>; +def : InstRW<[FXb], (instregex "(Call)?B(R)?(Asm.*)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "BI(C)?(Asm.*)?$")>; +def : InstRW<[FXa, EndGroup], (instregex "BRCT(G)?$")>; +def : InstRW<[FXb, FXa, Lat2, GroupAlone], (instregex "BRCTH$")>; +def : InstRW<[FXb, FXa, Lat2, GroupAlone], (instregex "BCT(G)?(R)?$")>; +def : InstRW<[FXa, FXa, FXb, FXb, Lat4, GroupAlone], + (instregex "B(R)?X(H|L).*$")>; + +// Compare and branch +def : InstRW<[FXb], (instregex "C(L)?(G)?(I|R)J(Asm.*)?$")>; +def : InstRW<[FXb, FXb, Lat2, GroupAlone], + (instregex "C(L)?(G)?(I|R)B(Call|Return|Asm.*)?$")>; + +//===----------------------------------------------------------------------===// +// Trap instructions +//===----------------------------------------------------------------------===// + +// Trap +def : InstRW<[VBU], (instregex "(Cond)?Trap$")>; + +// Compare and trap +def : InstRW<[FXb], (instregex "C(G)?(I|R)T(Asm.*)?$")>; +def : InstRW<[FXb], (instregex "CL(G)?RT(Asm.*)?$")>; +def : InstRW<[FXb], (instregex "CL(F|G)IT(Asm.*)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CL(G)?T(Asm.*)?$")>; + +//===----------------------------------------------------------------------===// +// Call and return instructions +//===----------------------------------------------------------------------===// + +// Call +def : InstRW<[VBU, FXa, FXa, Lat3, GroupAlone], (instregex "(Call)?BRAS$")>; +def : InstRW<[FXa, FXa, FXb, Lat3, GroupAlone], (instregex "(Call)?BRASL$")>; +def : InstRW<[FXa, FXa, FXb, Lat3, GroupAlone], (instregex "(Call)?BAS(R)?$")>; +def : InstRW<[FXa, FXa, FXb, Lat3, GroupAlone], (instregex "TLS_(G|L)DCALL$")>; + +// Return +def : InstRW<[FXb, EndGroup], (instregex "Return$")>; +def : InstRW<[FXb], (instregex "CondReturn$")>; + +//===----------------------------------------------------------------------===// +// Select instructions +//===----------------------------------------------------------------------===// + +// Select pseudo +def : InstRW<[FXa], (instregex "Select(32|64|32Mux)$")>; + +// CondStore pseudos +def : InstRW<[FXa], (instregex "CondStore16(Inv)?$")>; +def : InstRW<[FXa], (instregex "CondStore16Mux(Inv)?$")>; +def : InstRW<[FXa], (instregex "CondStore32(Inv)?$")>; +def : InstRW<[FXa], (instregex "CondStore32Mux(Inv)?$")>; +def : InstRW<[FXa], (instregex "CondStore64(Inv)?$")>; +def : InstRW<[FXa], (instregex "CondStore8(Inv)?$")>; +def : InstRW<[FXa], (instregex "CondStore8Mux(Inv)?$")>; + +//===----------------------------------------------------------------------===// +// Move instructions +//===----------------------------------------------------------------------===// + +// Moves +def : InstRW<[FXb, LSU, Lat5], (instregex "MV(G|H)?HI$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "MVI(Y)?$")>; + +// Move character +def : InstRW<[FXb, LSU, LSU, LSU, Lat8, GroupAlone], (instregex "MVC$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCL(E|U)?$")>; + +// Pseudo -> reg move +def : InstRW<[FXa], (instregex "COPY(_TO_REGCLASS)?$")>; +def : InstRW<[FXa], (instregex "EXTRACT_SUBREG$")>; +def : InstRW<[FXa], (instregex "INSERT_SUBREG$")>; +def : InstRW<[FXa], (instregex "REG_SEQUENCE$")>; +def : InstRW<[FXa], (instregex "SUBREG_TO_REG$")>; + +// Loads +def : InstRW<[LSU], (instregex "L(Y|FH|RL|Mux|CBB)?$")>; +def : InstRW<[LSU], (instregex "LG(RL)?$")>; +def : InstRW<[LSU], (instregex "L128$")>; + +def : InstRW<[FXa], (instregex "LLIH(F|H|L)$")>; +def : InstRW<[FXa], (instregex "LLIL(F|H|L)$")>; + +def : InstRW<[FXa], (instregex "LG(F|H)I$")>; +def : InstRW<[FXa], (instregex "LHI(Mux)?$")>; +def : InstRW<[FXa], (instregex "LR(Mux)?$")>; + +// Load and zero rightmost byte +def : InstRW<[LSU], (instregex "LZR(F|G)$")>; + +// Load and trap +def : InstRW<[FXb, LSU, Lat5], (instregex "L(FH|G)?AT$")>; + +// Load and test +def : InstRW<[FXa, LSU, Lat5], (instregex "LT(G)?$")>; +def : InstRW<[FXa], (instregex "LT(G)?R$")>; + +// Stores +def : InstRW<[FXb, LSU, Lat5], (instregex "STG(RL)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "ST128$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "ST(Y|FH|RL|Mux)?$")>; + +// String moves. +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVST$")>; + +//===----------------------------------------------------------------------===// +// Conditional move instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, Lat2], (instregex "LOCRMux$")>; +def : InstRW<[FXa, Lat2], (instregex "LOC(G|FH)?R(Asm.*)?$")>; +def : InstRW<[FXa, Lat2], (instregex "LOC(G|H)?HI(Mux|(Asm.*))?$")>; +def : InstRW<[FXa, LSU, Lat6], (instregex "LOC(G|FH|Mux)?(Asm.*)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "STOC(G|FH|Mux)?(Asm.*)?$")>; + +//===----------------------------------------------------------------------===// +// Sign extensions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "L(B|H|G)R$")>; +def : InstRW<[FXa], (instregex "LG(B|H|F)R$")>; + +def : InstRW<[FXa, LSU, Lat5], (instregex "LTGF$")>; +def : InstRW<[FXa], (instregex "LTGFR$")>; + +def : InstRW<[FXa, LSU, Lat5], (instregex "LB(H|Mux)?$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "LH(Y)?$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "LH(H|Mux|RL)$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "LG(B|H|F)$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "LG(H|F)RL$")>; + +//===----------------------------------------------------------------------===// +// Zero extensions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "LLCR(Mux)?$")>; +def : InstRW<[FXa], (instregex "LLHR(Mux)?$")>; +def : InstRW<[FXa], (instregex "LLG(C|H|F|T)R$")>; +def : InstRW<[LSU], (instregex "LLC(Mux)?$")>; +def : InstRW<[LSU], (instregex "LLH(Mux)?$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "LL(C|H)H$")>; +def : InstRW<[LSU], (instregex "LLHRL$")>; +def : InstRW<[LSU], (instregex "LLG(C|H|F|T|HRL|FRL)$")>; + +// Load and zero rightmost byte +def : InstRW<[LSU], (instregex "LLZRGF$")>; + +// Load and trap +def : InstRW<[FXb, LSU, Lat5], (instregex "LLG(F|T)?AT$")>; + +//===----------------------------------------------------------------------===// +// Truncations +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat5], (instregex "STC(H|Y|Mux)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "STH(H|Y|RL|Mux)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "STCM(H|Y)?$")>; + +//===----------------------------------------------------------------------===// +// Multi-register moves +//===----------------------------------------------------------------------===// + +// Load multiple (estimated average of 5 ops) +def : InstRW<[LSU, LSU, LSU, LSU, LSU, Lat10, GroupAlone], + (instregex "LM(H|Y|G)?$")>; + +// Load multiple disjoint +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "LMD$")>; + +// Store multiple (estimated average of ceil(5/2) FXb ops) +def : InstRW<[LSU, LSU, FXb, FXb, FXb, Lat10, + GroupAlone], (instregex "STM(G|H|Y)?$")>; + +//===----------------------------------------------------------------------===// +// Byte swaps +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "LRV(G)?R$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "LRV(G|H)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "STRV(G|H)?$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCIN$")>; + +//===----------------------------------------------------------------------===// +// Load address instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "LA(Y|RL)?$")>; + +// Load the Global Offset Table address ( -> larl ) +def : InstRW<[FXa], (instregex "GOT$")>; + +//===----------------------------------------------------------------------===// +// Absolute and Negation +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "LP(G)?R$")>; +def : InstRW<[FXa, FXa, Lat2, BeginGroup], (instregex "L(N|P)GFR$")>; +def : InstRW<[FXa], (instregex "LN(R|GR)$")>; +def : InstRW<[FXa], (instregex "LC(R|GR)$")>; +def : InstRW<[FXa, FXa, Lat2, BeginGroup], (instregex "LCGFR$")>; + +//===----------------------------------------------------------------------===// +// Insertion +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat5], (instregex "IC(Y)?$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "IC32(Y)?$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "ICM(H|Y)?$")>; +def : InstRW<[FXa], (instregex "II(F|H|L)Mux$")>; +def : InstRW<[FXa], (instregex "IIHF(64)?$")>; +def : InstRW<[FXa], (instregex "IIHH(64)?$")>; +def : InstRW<[FXa], (instregex "IIHL(64)?$")>; +def : InstRW<[FXa], (instregex "IILF(64)?$")>; +def : InstRW<[FXa], (instregex "IILH(64)?$")>; +def : InstRW<[FXa], (instregex "IILL(64)?$")>; + +//===----------------------------------------------------------------------===// +// Addition +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat5], (instregex "A(Y)?$")>; +def : InstRW<[FXa, LSU, Lat6], (instregex "AH(Y)?$")>; +def : InstRW<[FXa], (instregex "AIH$")>; +def : InstRW<[FXa], (instregex "AFI(Mux)?$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "AG$")>; +def : InstRW<[FXa], (instregex "AGFI$")>; +def : InstRW<[FXa], (instregex "AGHI(K)?$")>; +def : InstRW<[FXa], (instregex "AGR(K)?$")>; +def : InstRW<[FXa], (instregex "AHI(K)?$")>; +def : InstRW<[FXa], (instregex "AHIMux(K)?$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "AL(Y)?$")>; +def : InstRW<[FXa], (instregex "AL(FI|HSIK)$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "ALG(F)?$")>; +def : InstRW<[FXa], (instregex "ALGHSIK$")>; +def : InstRW<[FXa], (instregex "ALGF(I|R)$")>; +def : InstRW<[FXa], (instregex "ALGR(K)?$")>; +def : InstRW<[FXa], (instregex "ALR(K)?$")>; +def : InstRW<[FXa], (instregex "AR(K)?$")>; +def : InstRW<[FXa], (instregex "A(L)?HHHR$")>; +def : InstRW<[FXa, Lat2], (instregex "A(L)?HHLR$")>; +def : InstRW<[FXa], (instregex "ALSIH(N)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "A(L)?(G)?SI$")>; + +// Logical addition with carry +def : InstRW<[FXa, LSU, Lat6, GroupAlone], (instregex "ALC(G)?$")>; +def : InstRW<[FXa, Lat2, GroupAlone], (instregex "ALC(G)?R$")>; + +// Add with sign extension (16/32 -> 64) +def : InstRW<[FXa, LSU, Lat6], (instregex "AG(F|H)$")>; +def : InstRW<[FXa, Lat2], (instregex "AGFR$")>; + +//===----------------------------------------------------------------------===// +// Subtraction +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat5], (instregex "S(G|Y)?$")>; +def : InstRW<[FXa, LSU, Lat6], (instregex "SH(Y)?$")>; +def : InstRW<[FXa], (instregex "SGR(K)?$")>; +def : InstRW<[FXa], (instregex "SLFI$")>; +def : InstRW<[FXa, LSU, Lat5], (instregex "SL(G|GF|Y)?$")>; +def : InstRW<[FXa], (instregex "SLGF(I|R)$")>; +def : InstRW<[FXa], (instregex "SLGR(K)?$")>; +def : InstRW<[FXa], (instregex "SLR(K)?$")>; +def : InstRW<[FXa], (instregex "SR(K)?$")>; +def : InstRW<[FXa], (instregex "S(L)?HHHR$")>; +def : InstRW<[FXa, Lat2], (instregex "S(L)?HHLR$")>; + +// Subtraction with borrow +def : InstRW<[FXa, LSU, Lat6, GroupAlone], (instregex "SLB(G)?$")>; +def : InstRW<[FXa, Lat2, GroupAlone], (instregex "SLB(G)?R$")>; + +// Subtraction with sign extension (16/32 -> 64) +def : InstRW<[FXa, LSU, Lat6], (instregex "SG(F|H)$")>; +def : InstRW<[FXa, Lat2], (instregex "SGFR$")>; + +//===----------------------------------------------------------------------===// +// AND +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat5], (instregex "N(G|Y)?$")>; +def : InstRW<[FXa], (instregex "NGR(K)?$")>; +def : InstRW<[FXa], (instregex "NI(FMux|HMux|LMux)$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "NI(Y)?$")>; +def : InstRW<[FXa], (instregex "NIHF(64)?$")>; +def : InstRW<[FXa], (instregex "NIHH(64)?$")>; +def : InstRW<[FXa], (instregex "NIHL(64)?$")>; +def : InstRW<[FXa], (instregex "NILF(64)?$")>; +def : InstRW<[FXa], (instregex "NILH(64)?$")>; +def : InstRW<[FXa], (instregex "NILL(64)?$")>; +def : InstRW<[FXa], (instregex "NR(K)?$")>; +def : InstRW<[LSU, LSU, FXb, Lat9, BeginGroup], (instregex "NC$")>; + +//===----------------------------------------------------------------------===// +// OR +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat5], (instregex "O(G|Y)?$")>; +def : InstRW<[FXa], (instregex "OGR(K)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "OI(Y)?$")>; +def : InstRW<[FXa], (instregex "OI(FMux|HMux|LMux)$")>; +def : InstRW<[FXa], (instregex "OIHF(64)?$")>; +def : InstRW<[FXa], (instregex "OIHH(64)?$")>; +def : InstRW<[FXa], (instregex "OIHL(64)?$")>; +def : InstRW<[FXa], (instregex "OILF(64)?$")>; +def : InstRW<[FXa], (instregex "OILH(64)?$")>; +def : InstRW<[FXa], (instregex "OILL(64)?$")>; +def : InstRW<[FXa], (instregex "OR(K)?$")>; +def : InstRW<[LSU, LSU, FXb, Lat9, BeginGroup], (instregex "OC$")>; + +//===----------------------------------------------------------------------===// +// XOR +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat5], (instregex "X(G|Y)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "XI(Y)?$")>; +def : InstRW<[FXa], (instregex "XIFMux$")>; +def : InstRW<[FXa], (instregex "XGR(K)?$")>; +def : InstRW<[FXa], (instregex "XIHF(64)?$")>; +def : InstRW<[FXa], (instregex "XILF(64)?$")>; +def : InstRW<[FXa], (instregex "XR(K)?$")>; +def : InstRW<[LSU, LSU, FXb, Lat9, BeginGroup], (instregex "XC$")>; + +//===----------------------------------------------------------------------===// +// Multiplication +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat9], (instregex "MS(GF|Y)?$")>; +def : InstRW<[FXa, Lat5], (instregex "MS(R|FI)$")>; +def : InstRW<[FXa, LSU, Lat11], (instregex "MSG$")>; +def : InstRW<[FXa, Lat7], (instregex "MSGR$")>; +def : InstRW<[FXa, Lat5], (instregex "MSGF(I|R)$")>; +def : InstRW<[FXa2, LSU, Lat12, GroupAlone], (instregex "MLG$")>; +def : InstRW<[FXa2, Lat8, GroupAlone], (instregex "MLGR$")>; +def : InstRW<[FXa, Lat4], (instregex "MGHI$")>; +def : InstRW<[FXa, Lat4], (instregex "MHI$")>; +def : InstRW<[FXa, LSU, Lat8], (instregex "MH(Y)?$")>; +def : InstRW<[FXa2, Lat6, GroupAlone], (instregex "M(L)?R$")>; +def : InstRW<[FXa2, LSU, Lat10, GroupAlone], (instregex "M(FY|L)?$")>; +def : InstRW<[FXa, LSU, Lat8], (instregex "MGH$")>; +def : InstRW<[FXa, FXa, LSU, Lat12, GroupAlone], (instregex "MG$")>; +def : InstRW<[FXa, FXa, Lat8, GroupAlone], (instregex "MGRK$")>; +def : InstRW<[FXa, LSU, Lat9], (instregex "MSC$")>; +def : InstRW<[FXa, LSU, Lat11], (instregex "MSGC$")>; +def : InstRW<[FXa, Lat5], (instregex "MSRKC$")>; +def : InstRW<[FXa, Lat7], (instregex "MSGRKC$")>; + +//===----------------------------------------------------------------------===// +// Division and remainder +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa2, FXa2, Lat20, GroupAlone], (instregex "DR$")>; +def : InstRW<[FXa2, FXa2, LSU, Lat30, GroupAlone], (instregex "D$")>; +def : InstRW<[FXa2, Lat30, GroupAlone], (instregex "DSG(F)?R$")>; +def : InstRW<[LSU, FXa2, Lat30, GroupAlone], (instregex "DSG(F)?$")>; +def : InstRW<[FXa2, FXa2, Lat20, GroupAlone], (instregex "DLR$")>; +def : InstRW<[FXa2, FXa2, Lat30, GroupAlone], (instregex "DLGR$")>; +def : InstRW<[FXa2, FXa2, LSU, Lat30, GroupAlone], (instregex "DL(G)?$")>; + +//===----------------------------------------------------------------------===// +// Shifts +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "SLL(G|K)?$")>; +def : InstRW<[FXa], (instregex "SRL(G|K)?$")>; +def : InstRW<[FXa], (instregex "SRA(G|K)?$")>; +def : InstRW<[FXa], (instregex "SLA(G|K)?$")>; +def : InstRW<[FXa, FXa, FXa, FXa, LSU, Lat8, GroupAlone], + (instregex "S(L|R)D(A|L)$")>; + +// Rotate +def : InstRW<[FXa, LSU, Lat6], (instregex "RLL(G)?$")>; + +// Rotate and insert +def : InstRW<[FXa], (instregex "RISBG(N|32)?$")>; +def : InstRW<[FXa], (instregex "RISBH(G|H|L)$")>; +def : InstRW<[FXa], (instregex "RISBL(G|H|L)$")>; +def : InstRW<[FXa], (instregex "RISBMux$")>; + +// Rotate and Select +def : InstRW<[FXa, FXa, Lat2, BeginGroup], (instregex "R(N|O|X)SBG$")>; + +//===----------------------------------------------------------------------===// +// Comparison +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat5], (instregex "C(G|Y|Mux|RL)?$")>; +def : InstRW<[FXb], (instregex "C(F|H)I(Mux)?$")>; +def : InstRW<[FXb], (instregex "CG(F|H)I$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CG(HSI|RL)$")>; +def : InstRW<[FXb], (instregex "C(G)?R$")>; +def : InstRW<[FXb], (instregex "CIH$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CH(F|SI)$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CL(Y|Mux|FHSI)?$")>; +def : InstRW<[FXb], (instregex "CLFI(Mux)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CLG(HRL|HSI)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CLGF(RL)?$")>; +def : InstRW<[FXb], (instregex "CLGF(I|R)$")>; +def : InstRW<[FXb], (instregex "CLGR$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CLGRL$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CLH(F|RL|HSI)$")>; +def : InstRW<[FXb], (instregex "CLIH$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CLI(Y)?$")>; +def : InstRW<[FXb], (instregex "CLR$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "CLRL$")>; +def : InstRW<[FXb], (instregex "C(L)?HHR$")>; +def : InstRW<[FXb, Lat2], (instregex "C(L)?HLR$")>; + +// Compare halfword +def : InstRW<[FXb, LSU, Lat6], (instregex "CH(Y|RL)?$")>; +def : InstRW<[FXb, LSU, Lat6], (instregex "CGH(RL)?$")>; +def : InstRW<[FXa, FXb, LSU, Lat6, BeginGroup], (instregex "CHHSI$")>; + +// Compare with sign extension (32 -> 64) +def : InstRW<[FXb, LSU, Lat6], (instregex "CGF(RL)?$")>; +def : InstRW<[FXb, Lat2], (instregex "CGFR$")>; + +// Compare logical character +def : InstRW<[FXb, LSU, LSU, Lat9, BeginGroup], (instregex "CLC$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLCL(E|U)?$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLST$")>; + +// Test under mask +def : InstRW<[FXb, LSU, Lat5], (instregex "TM(Y)?$")>; +def : InstRW<[FXb], (instregex "TM(H|L)Mux$")>; +def : InstRW<[FXb], (instregex "TMHH(64)?$")>; +def : InstRW<[FXb], (instregex "TMHL(64)?$")>; +def : InstRW<[FXb], (instregex "TMLH(64)?$")>; +def : InstRW<[FXb], (instregex "TMLL(64)?$")>; + +// Compare logical characters under mask +def : InstRW<[FXb, LSU, Lat6], (instregex "CLM(H|Y)?$")>; + +//===----------------------------------------------------------------------===// +// Prefetch and execution hint +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU], (instregex "PFD(RL)?$")>; +def : InstRW<[FXb, Lat2], (instregex "BPP$")>; +def : InstRW<[FXb, EndGroup], (instregex "BPRP$")>; +def : InstRW<[FXb], (instregex "NIAI$")>; + +//===----------------------------------------------------------------------===// +// Atomic operations +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, EndGroup], (instregex "Serialize$")>; + +def : InstRW<[FXb, LSU, Lat5], (instregex "LAA(G)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "LAAL(G)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "LAN(G)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "LAO(G)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "LAX(G)?$")>; + +// Test and set +def : InstRW<[FXb, LSU, Lat5, EndGroup], (instregex "TS$")>; + +// Compare and swap +def : InstRW<[FXa, FXb, LSU, Lat6, GroupAlone], (instregex "CS(G|Y)?$")>; + +// Compare double and swap +def : InstRW<[FXa, FXa, FXb, FXb, FXa, LSU, Lat10, GroupAlone], + (instregex "CDS(Y)?$")>; +def : InstRW<[FXa, FXa, FXb, FXb, LSU, FXb, FXb, LSU, LSU, Lat20, GroupAlone], + (instregex "CDSG$")>; + +// Compare and swap and store +def : InstRW<[FXa, LSU, Lat30], (instregex "CSST$")>; + +// Perform locked operation +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "PLO$")>; + +// Load/store pair from/to quadword +def : InstRW<[LSU, LSU, Lat5, GroupAlone], (instregex "LPQ$")>; +def : InstRW<[FXb, FXb, LSU, Lat6, GroupAlone], (instregex "STPQ$")>; + +// Load pair disjoint +def : InstRW<[LSU, LSU, Lat5, GroupAlone], (instregex "LPD(G)?$")>; + +//===----------------------------------------------------------------------===// +// Translate and convert +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "TR$")>; +def : InstRW<[FXa, FXa, FXa, LSU, LSU, Lat30, GroupAlone], (instregex "TRT$")>; +def : InstRW<[FXa, LSU, Lat30], (instregex "TRTR$")>; +def : InstRW<[FXa, Lat30], (instregex "TR(TR)?(T)?(E|EOpt)?$")>; +def : InstRW<[LSU, Lat30], (instregex "TR(T|O)(T|O)(Opt)?$")>; +def : InstRW<[FXa, Lat30], (instregex "CU(12|14|21|24|41|42)(Opt)?$")>; +def : InstRW<[FXa, Lat30], (instregex "(CUUTF|CUTFU)(Opt)?$")>; + +//===----------------------------------------------------------------------===// +// Message-security assist +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, Lat30], (instregex "KM(C|F|O|CTR|A)?$")>; +def : InstRW<[FXa, Lat30], (instregex "(KIMD|KLMD|KMAC)$")>; +def : InstRW<[FXa, Lat30], (instregex "(PCC|PPNO|PRNO)$")>; + +//===----------------------------------------------------------------------===// +// Guarded storage +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU], (instregex "LGG$")>; +def : InstRW<[LSU, Lat5], (instregex "LLGFSG$")>; +def : InstRW<[LSU, Lat30], (instregex "(L|ST)GSC$")>; + +//===----------------------------------------------------------------------===// +// Decimal arithmetic +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, VecDF, VecDF, LSU, LSU, Lat30, GroupAlone], + (instregex "CVBG$")>; +def : InstRW<[FXb, VecDF, LSU, Lat30, GroupAlone], (instregex "CVB(Y)?$")>; +def : InstRW<[FXb, FXb, FXb, VecDF2, VecDF2, LSU, Lat30, GroupAlone], + (instregex "CVDG$")>; +def : InstRW<[FXb, VecDF, FXb, LSU, Lat30, GroupAlone], (instregex "CVD(Y)?$")>; +def : InstRW<[LSU, Lat10, GroupAlone], (instregex "MVO$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MV(N|Z)$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "(PACK|PKA|PKU)$")>; +def : InstRW<[LSU, Lat12, GroupAlone], (instregex "UNPK(A|U)$")>; +def : InstRW<[FXb, LSU, LSU, Lat9, BeginGroup], (instregex "UNPK$")>; + +def : InstRW<[FXb, VecDFX, LSU, LSU, LSU, Lat9, GroupAlone], + (instregex "(A|S|ZA)P$")>; +def : InstRW<[FXb, VecDFX2, VecDFX2, LSU, LSU, LSU, Lat30, GroupAlone], + (instregex "(M|D)P$")>; +def : InstRW<[FXb, VecDFX, VecDFX, LSU, LSU, Lat15, GroupAlone], + (instregex "SRP$")>; +def : InstRW<[VecDFX, LSU, LSU, Lat5, GroupAlone], (instregex "CP$")>; +def : InstRW<[VecDFX, LSU, Lat4, BeginGroup], (instregex "TP$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "ED(MK)?$")>; + +//===----------------------------------------------------------------------===// +// Access registers +//===----------------------------------------------------------------------===// + +// Extract/set/copy access register +def : InstRW<[LSU], (instregex "(EAR|SAR|CPYA)$")>; + +// Load address extended +def : InstRW<[LSU, FXa, Lat5, BeginGroup], (instregex "LAE(Y)?$")>; + +// Load/store access multiple (not modeled precisely) +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "(L|ST)AM(Y)?$")>; + +//===----------------------------------------------------------------------===// +// Program mask and addressing mode +//===----------------------------------------------------------------------===// + +// Insert Program Mask +def : InstRW<[FXa, Lat3, EndGroup], (instregex "IPM$")>; + +// Set Program Mask +def : InstRW<[LSU, EndGroup], (instregex "SPM$")>; + +// Branch and link +def : InstRW<[FXa, FXa, FXb, Lat5, GroupAlone], (instregex "BAL(R)?$")>; + +// Test addressing mode +def : InstRW<[FXb], (instregex "TAM$")>; + +// Set addressing mode +def : InstRW<[FXb, Lat2, EndGroup], (instregex "SAM(24|31|64)$")>; + +// Branch (and save) and set mode. +def : InstRW<[FXa, FXb, Lat2, GroupAlone], (instregex "BSM$")>; +def : InstRW<[FXa, FXa, FXb, Lat3, GroupAlone], (instregex "BASSM$")>; + +//===----------------------------------------------------------------------===// +// Transactional execution +//===----------------------------------------------------------------------===// + +// Transaction begin +def : InstRW<[LSU, LSU, FXb, FXb, FXb, FXb, FXb, Lat15, GroupAlone], + (instregex "TBEGIN(C|_nofloat)?$")>; + +// Transaction end +def : InstRW<[FXb, GroupAlone], (instregex "TEND$")>; + +// Transaction abort +def : InstRW<[LSU, GroupAlone], (instregex "TABORT$")>; + +// Extract Transaction Nesting Depth +def : InstRW<[FXa], (instregex "ETND$")>; + +// Nontransactional store +def : InstRW<[FXb, LSU, Lat5], (instregex "NTSTG$")>; + +//===----------------------------------------------------------------------===// +// Processor assist +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, GroupAlone], (instregex "PPA$")>; + +//===----------------------------------------------------------------------===// +// Miscellaneous Instructions. +//===----------------------------------------------------------------------===// + +// Find leftmost one +def : InstRW<[FXa, FXa, Lat4, GroupAlone], (instregex "FLOGR$")>; + +// Population count +def : InstRW<[FXa, Lat3], (instregex "POPCNT$")>; + +// Extend +def : InstRW<[FXa], (instregex "AEXT128$")>; +def : InstRW<[FXa], (instregex "ZEXT128$")>; + +// String instructions +def : InstRW<[FXa, LSU, Lat30], (instregex "SRST$")>; +def : InstRW<[FXa, Lat30], (instregex "SRSTU$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CUSE$")>; + +// Various complex instructions +def : InstRW<[LSU, Lat30], (instregex "CFC$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "UPT$")>; +def : InstRW<[LSU, Lat30], (instregex "CKSM$")>; +def : InstRW<[FXa, Lat30], (instregex "CMPSC$")>; + +// Execute +def : InstRW<[FXb, GroupAlone], (instregex "EX(RL)?$")>; + +//===----------------------------------------------------------------------===// +// .insn directive instructions +//===----------------------------------------------------------------------===// + +// An "empty" sched-class will be assigned instead of the "invalid sched-class". +// getNumDecoderSlots() will then return 1 instead of 0. +def : InstRW<[], (instregex "Insn.*")>; + + +// ----------------------------- Floating point ----------------------------- // + +//===----------------------------------------------------------------------===// +// FP: Select instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa], (instregex "Select(F32|F64|F128|VR128)$")>; +def : InstRW<[FXa], (instregex "CondStoreF32(Inv)?$")>; +def : InstRW<[FXa], (instregex "CondStoreF64(Inv)?$")>; + +//===----------------------------------------------------------------------===// +// FP: Move instructions +//===----------------------------------------------------------------------===// + +// Load zero +def : InstRW<[FXb], (instregex "LZ(DR|ER)$")>; +def : InstRW<[FXb, FXb, Lat2, BeginGroup], (instregex "LZXR$")>; + +// Load +def : InstRW<[VecXsPm], (instregex "LER$")>; +def : InstRW<[FXb], (instregex "LD(R|R32|GR)$")>; +def : InstRW<[FXb, Lat3], (instregex "LGDR$")>; +def : InstRW<[FXb, FXb, Lat2, GroupAlone], (instregex "LXR$")>; + +// Load and Test +def : InstRW<[VecXsPm, Lat4], (instregex "LT(D|E)BR$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "LTEBRCompare(_VecPseudo)?$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "LTDBRCompare(_VecPseudo)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LTXBR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], + (instregex "LTXBRCompare(_VecPseudo)?$")>; + +// Copy sign +def : InstRW<[VecXsPm], (instregex "CPSDRd(d|s)$")>; +def : InstRW<[VecXsPm], (instregex "CPSDRs(d|s)$")>; + +//===----------------------------------------------------------------------===// +// FP: Load instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[VecXsPm, LSU, Lat7], (instregex "LE(Y)?$")>; +def : InstRW<[LSU], (instregex "LD(Y|E32)?$")>; +def : InstRW<[LSU], (instregex "LX$")>; + +//===----------------------------------------------------------------------===// +// FP: Store instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat7], (instregex "STD(Y)?$")>; +def : InstRW<[FXb, LSU, Lat7], (instregex "STE(Y)?$")>; +def : InstRW<[FXb, LSU, Lat5], (instregex "STX$")>; + +//===----------------------------------------------------------------------===// +// FP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[VecBF], (instregex "LEDBR(A)?$")>; +def : InstRW<[VecDF, VecDF, Lat20], (instregex "LEXBR(A)?$")>; +def : InstRW<[VecDF, VecDF, Lat20], (instregex "LDXBR(A)?$")>; + +// Load lengthened +def : InstRW<[VecBF, LSU, Lat12], (instregex "LDEB$")>; +def : InstRW<[VecBF], (instregex "LDEBR$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12 , GroupAlone], (instregex "LX(D|E)B$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "LX(D|E)BR$")>; + +// Convert from fixed / logical +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CE(F|G)BR(A)?$")>; +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CD(F|G)BR(A)?$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat12, GroupAlone], (instregex "CX(F|G)BR(A)?$")>; +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CEL(F|G)BR$")>; +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CDL(F|G)BR$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat12, GroupAlone], (instregex "CXL(F|G)BR$")>; + +// Convert to fixed / logical +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CF(E|D)BR(A)?$")>; +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CG(E|D)BR(A)?$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat20, BeginGroup], (instregex "C(F|G)XBR(A)?$")>; +def : InstRW<[FXb, VecBF, Lat11, GroupAlone], (instregex "CLFEBR$")>; +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CLFDBR$")>; +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CLG(E|D)BR$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat20, BeginGroup], (instregex "CL(F|G)XBR$")>; + +//===----------------------------------------------------------------------===// +// FP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load Complement / Negative / Positive +def : InstRW<[VecXsPm, Lat4], (instregex "L(C|N|P)DBR$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "L(C|N|P)EBR$")>; +def : InstRW<[FXb], (instregex "LCDFR(_32)?$")>; +def : InstRW<[FXb], (instregex "LNDFR(_32)?$")>; +def : InstRW<[FXb], (instregex "LPDFR(_32)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "L(C|N|P)XBR$")>; + +// Square root +def : InstRW<[VecFPd, LSU], (instregex "SQ(E|D)B$")>; +def : InstRW<[VecFPd], (instregex "SQ(E|D)BR$")>; +def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "SQXBR$")>; + +// Load FP integer +def : InstRW<[VecBF], (instregex "FIEBR(A)?$")>; +def : InstRW<[VecBF], (instregex "FIDBR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXBR(A)?$")>; + +//===----------------------------------------------------------------------===// +// FP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[VecBF, LSU, Lat12], (instregex "A(E|D)B$")>; +def : InstRW<[VecBF], (instregex "A(E|D)BR$")>; +def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXBR$")>; + +// Subtraction +def : InstRW<[VecBF, LSU, Lat12], (instregex "S(E|D)B$")>; +def : InstRW<[VecBF], (instregex "S(E|D)BR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "SXBR$")>; + +// Multiply +def : InstRW<[VecBF, LSU, Lat12], (instregex "M(D|DE|EE)B$")>; +def : InstRW<[VecBF], (instregex "M(D|DE|EE)BR$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MXDB$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MXDBR$")>; +def : InstRW<[VecDF2, VecDF2, Lat20, GroupAlone], (instregex "MXBR$")>; + +// Multiply and add / subtract +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)EBR$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>; +def : InstRW<[VecBF], (instregex "M(A|S)DBR$")>; + +// Division +def : InstRW<[VecFPd, LSU], (instregex "D(E|D)B$")>; +def : InstRW<[VecFPd], (instregex "D(E|D)BR$")>; +def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "DXBR$")>; + +// Divide to integer +def : InstRW<[VecFPd, Lat30], (instregex "DI(E|D)BR$")>; + +//===----------------------------------------------------------------------===// +// FP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[VecXsPm, LSU, Lat8], (instregex "(K|C)(E|D)B$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "(K|C)(E|D)BR?$")>; +def : InstRW<[VecDF, VecDF, Lat20, GroupAlone], (instregex "(K|C)XBR$")>; + +// Test Data Class +def : InstRW<[LSU, VecXsPm, Lat9], (instregex "TC(E|D)B$")>; +def : InstRW<[LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "TCXB$")>; + +//===----------------------------------------------------------------------===// +// FP: Floating-point control register instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, LSU, Lat4, GroupAlone], (instregex "EFPC$")>; +def : InstRW<[FXb, LSU, Lat5, GroupAlone], (instregex "STFPC$")>; +def : InstRW<[LSU, Lat3, GroupAlone], (instregex "SFPC$")>; +def : InstRW<[LSU, LSU, Lat6, GroupAlone], (instregex "LFPC$")>; +def : InstRW<[FXa, Lat30], (instregex "SFASR$")>; +def : InstRW<[FXa, LSU, Lat30], (instregex "LFAS$")>; +def : InstRW<[FXb, Lat3, GroupAlone], (instregex "SRNM(B|T)?$")>; + + +// --------------------- Hexadecimal floating point ------------------------- // + +//===----------------------------------------------------------------------===// +// HFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[VecXsPm, Lat4], (instregex "LT(D|E)R$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LTXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[VecBF], (instregex "(LEDR|LRER)$")>; +def : InstRW<[VecBF], (instregex "LEXR$")>; +def : InstRW<[VecDF2], (instregex "(LDXR|LRDR)$")>; + +// Load lengthened +def : InstRW<[LSU], (instregex "LDE$")>; +def : InstRW<[FXb], (instregex "LDER$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "LX(D|E)$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "LX(D|E)R$")>; + +// Convert from fixed +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CE(F|G)R$")>; +def : InstRW<[FXb, VecBF, Lat9, BeginGroup], (instregex "CD(F|G)R$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat12, GroupAlone], (instregex "CX(F|G)R$")>; + +// Convert to fixed +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CF(E|D)R$")>; +def : InstRW<[FXb, VecBF, Lat11, BeginGroup], (instregex "CG(E|D)R$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat20, BeginGroup], (instregex "C(F|G)XR$")>; + +// Convert BFP to HFP / HFP to BFP. +def : InstRW<[VecBF], (instregex "THD(E)?R$")>; +def : InstRW<[VecBF], (instregex "TB(E)?DR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load Complement / Negative / Positive +def : InstRW<[VecXsPm, Lat4], (instregex "L(C|N|P)DR$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "L(C|N|P)ER$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "L(C|N|P)XR$")>; + +// Halve +def : InstRW<[VecBF], (instregex "H(E|D)R$")>; + +// Square root +def : InstRW<[VecFPd, LSU], (instregex "SQ(E|D)$")>; +def : InstRW<[VecFPd], (instregex "SQ(E|D)R$")>; +def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "SQXR$")>; + +// Load FP integer +def : InstRW<[VecBF], (instregex "FIER$")>; +def : InstRW<[VecBF], (instregex "FIDR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[VecBF, LSU, Lat12], (instregex "A(E|D|U|W)$")>; +def : InstRW<[VecBF], (instregex "A(E|D|U|W)R$")>; +def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXR$")>; + +// Subtraction +def : InstRW<[VecBF, LSU, Lat12], (instregex "S(E|D|U|W)$")>; +def : InstRW<[VecBF], (instregex "S(E|D|U|W)R$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "SXR$")>; + +// Multiply +def : InstRW<[VecBF, LSU, Lat12], (instregex "M(D|DE|E|EE)$")>; +def : InstRW<[VecBF], (instregex "M(D|DE|E|EE)R$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MXD$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MXDR$")>; +def : InstRW<[VecDF2, VecDF2, Lat20, GroupAlone], (instregex "MXR$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MY$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "MY(H|L)$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MYR$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "MY(H|L)R$")>; + +// Multiply and add / subtract +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)E$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)ER$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "M(A|S)D$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "M(A|S)DR$")>; +def : InstRW<[VecBF2, LSU, Lat12, GroupAlone], (instregex "MAY(H|L)$")>; +def : InstRW<[VecBF2, VecBF2, LSU, Lat12, GroupAlone], (instregex "MAY$")>; +def : InstRW<[VecBF, GroupAlone], (instregex "MAY(H|L)R$")>; +def : InstRW<[VecBF2, VecBF2, GroupAlone], (instregex "MAYR$")>; + +// Division +def : InstRW<[VecFPd, LSU], (instregex "D(E|D)$")>; +def : InstRW<[VecFPd], (instregex "D(E|D)R$")>; +def : InstRW<[VecFPd, VecFPd, GroupAlone], (instregex "DXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[VecBF, LSU, Lat12], (instregex "C(E|D)$")>; +def : InstRW<[VecBF], (instregex "C(E|D)R$")>; +def : InstRW<[VecDF, VecDF, Lat20, GroupAlone], (instregex "CXR$")>; + + +// ------------------------ Decimal floating point -------------------------- // + +//===----------------------------------------------------------------------===// +// DFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[VecDF], (instregex "LTDTR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LTXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[VecDF, Lat15], (instregex "LEDTR$")>; +def : InstRW<[VecDF, VecDF, Lat20], (instregex "LDXTR$")>; + +// Load lengthened +def : InstRW<[VecDF], (instregex "LDETR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "LXDTR$")>; + +// Convert from fixed / logical +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "CD(F|G)TR(A)?$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat30, GroupAlone], (instregex "CX(F|G)TR(A)?$")>; +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "CDL(F|G)TR$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat30, GroupAlone], (instregex "CXL(F|G)TR$")>; + +// Convert to fixed / logical +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "C(F|G)DTR(A)?$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat30, BeginGroup], (instregex "C(F|G)XTR(A)?$")>; +def : InstRW<[FXb, VecDF, Lat30, BeginGroup], (instregex "CL(F|G)DTR$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat30, BeginGroup], (instregex "CL(F|G)XTR$")>; + +// Convert from / to signed / unsigned packed +def : InstRW<[FXb, VecDF, Lat9, BeginGroup], (instregex "CD(S|U)TR$")>; +def : InstRW<[FXb, FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "CX(S|U)TR$")>; +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "C(S|U)DTR$")>; +def : InstRW<[FXb, FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "C(S|U)XTR$")>; + +// Convert from / to zoned +def : InstRW<[LSU, VecDF, Lat11, BeginGroup], (instregex "CDZT$")>; +def : InstRW<[LSU, LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "CXZT$")>; +def : InstRW<[FXb, LSU, VecDF, Lat11, BeginGroup], (instregex "CZDT$")>; +def : InstRW<[FXb, LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "CZXT$")>; + +// Convert from / to packed +def : InstRW<[LSU, VecDF, Lat11, BeginGroup], (instregex "CDPT$")>; +def : InstRW<[LSU, LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "CXPT$")>; +def : InstRW<[FXb, LSU, VecDF, Lat11, BeginGroup], (instregex "CPDT$")>; +def : InstRW<[FXb, LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "CPXT$")>; + +// Perform floating-point operation +def : InstRW<[FXb, Lat30], (instregex "PFPO$")>; + +//===----------------------------------------------------------------------===// +// DFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load FP integer +def : InstRW<[VecDF], (instregex "FIDTR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "FIXTR$")>; + +// Extract biased exponent +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "EEDTR$")>; +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "EEXTR$")>; + +// Extract significance +def : InstRW<[FXb, VecDF, Lat12, BeginGroup], (instregex "ESDTR$")>; +def : InstRW<[FXb, VecDF, VecDF, Lat15, BeginGroup], (instregex "ESXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[VecDF], (instregex "ADTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat10, GroupAlone], (instregex "AXTR(A)?$")>; + +// Subtraction +def : InstRW<[VecDF], (instregex "SDTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "SXTR(A)?$")>; + +// Multiply +def : InstRW<[VecDF, Lat30], (instregex "MDTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat30, GroupAlone], (instregex "MXTR(A)?$")>; + +// Division +def : InstRW<[VecDF, Lat30], (instregex "DDTR(A)?$")>; +def : InstRW<[VecDF2, VecDF2, Lat30, GroupAlone], (instregex "DXTR(A)?$")>; + +// Quantize +def : InstRW<[VecDF], (instregex "QADTR$")>; +def : InstRW<[VecDF2, VecDF2, Lat11, GroupAlone], (instregex "QAXTR$")>; + +// Reround +def : InstRW<[FXb, VecDF, Lat11, BeginGroup], (instregex "RRDTR$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "RRXTR$")>; + +// Shift significand left/right +def : InstRW<[LSU, VecDF, Lat11, GroupAlone], (instregex "S(L|R)DT$")>; +def : InstRW<[LSU, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "S(L|R)XT$")>; + +// Insert biased exponent +def : InstRW<[FXb, VecDF, Lat11, BeginGroup], (instregex "IEDTR$")>; +def : InstRW<[FXb, VecDF2, VecDF2, Lat15, GroupAlone], (instregex "IEXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[VecDF], (instregex "(K|C)DTR$")>; +def : InstRW<[VecDF, VecDF, Lat11, GroupAlone], (instregex "(K|C)XTR$")>; + +// Compare biased exponent +def : InstRW<[VecDF], (instregex "CEDTR$")>; +def : InstRW<[VecDF], (instregex "CEXTR$")>; + +// Test Data Class/Group +def : InstRW<[LSU, VecDF, Lat11], (instregex "TD(C|G)(E|D)T$")>; +def : InstRW<[LSU, VecDF, VecDF, Lat15, GroupAlone], (instregex "TD(C|G)XT$")>; + + +// --------------------------------- Vector --------------------------------- // + +//===----------------------------------------------------------------------===// +// Vector: Move instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb], (instregex "VLR(32|64)?$")>; +def : InstRW<[FXb, Lat4], (instregex "VLGV(B|F|G|H)?$")>; +def : InstRW<[FXb], (instregex "VLVG(B|F|G|H)?$")>; +def : InstRW<[FXb, Lat2], (instregex "VLVGP(32)?$")>; + +//===----------------------------------------------------------------------===// +// Vector: Immediate instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[VecXsPm], (instregex "VZERO$")>; +def : InstRW<[VecXsPm], (instregex "VONE$")>; +def : InstRW<[VecXsPm], (instregex "VGBM$")>; +def : InstRW<[VecXsPm], (instregex "VGM(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VREPI(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VLEI(B|F|G|H)$")>; + +//===----------------------------------------------------------------------===// +// Vector: Loads +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU], (instregex "VL(L|BB)?$")>; +def : InstRW<[LSU], (instregex "VL(32|64)$")>; +def : InstRW<[LSU], (instregex "VLLEZ(B|F|G|H|LF)?$")>; +def : InstRW<[LSU], (instregex "VLREP(B|F|G|H)?$")>; +def : InstRW<[VecXsPm, LSU, Lat7], (instregex "VLE(B|F|G|H)$")>; +def : InstRW<[FXb, LSU, VecXsPm, Lat11, BeginGroup], (instregex "VGE(F|G)$")>; +def : InstRW<[LSU, LSU, LSU, LSU, LSU, Lat10, GroupAlone], + (instregex "VLM$")>; +def : InstRW<[LSU, Lat5], (instregex "VLRL(R)?$")>; + +//===----------------------------------------------------------------------===// +// Vector: Stores +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat8], (instregex "VST(L|32|64)?$")>; +def : InstRW<[FXb, LSU, Lat8], (instregex "VSTE(F|G)$")>; +def : InstRW<[FXb, LSU, VecXsPm, Lat11, BeginGroup], (instregex "VSTE(B|H)$")>; +def : InstRW<[LSU, LSU, FXb, FXb, FXb, FXb, FXb, Lat20, GroupAlone], + (instregex "VSTM$")>; +def : InstRW<[FXb, FXb, LSU, Lat12, BeginGroup], (instregex "VSCE(F|G)$")>; +def : InstRW<[FXb, LSU, Lat8], (instregex "VSTRL(R)?$")>; + +//===----------------------------------------------------------------------===// +// Vector: Selects and permutes +//===----------------------------------------------------------------------===// + +def : InstRW<[VecXsPm], (instregex "VMRH(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VMRL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VPERM$")>; +def : InstRW<[VecXsPm], (instregex "VPDI$")>; +def : InstRW<[VecXsPm], (instregex "VBPERM$")>; +def : InstRW<[VecXsPm], (instregex "VREP(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VSEL$")>; + +//===----------------------------------------------------------------------===// +// Vector: Widening and narrowing +//===----------------------------------------------------------------------===// + +def : InstRW<[VecXsPm], (instregex "VPK(F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VPKS(F|G|H)?$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VPKS(F|G|H)S$")>; +def : InstRW<[VecXsPm], (instregex "VPKLS(F|G|H)?$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VPKLS(F|G|H)S$")>; +def : InstRW<[VecXsPm], (instregex "VSEG(B|F|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VUPH(B|F|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VUPL(B|F)?$")>; +def : InstRW<[VecXsPm], (instregex "VUPLH(B|F|H|W)?$")>; +def : InstRW<[VecXsPm], (instregex "VUPLL(B|F|H)?$")>; + +//===----------------------------------------------------------------------===// +// Vector: Integer arithmetic +//===----------------------------------------------------------------------===// + +def : InstRW<[VecXsPm], (instregex "VA(B|F|G|H|Q|C|CQ)?$")>; +def : InstRW<[VecXsPm], (instregex "VACC(B|F|G|H|Q|C|CQ)?$")>; +def : InstRW<[VecXsPm], (instregex "VAVG(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VAVGL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VN(C|O|N|X)?$")>; +def : InstRW<[VecXsPm], (instregex "VO(C)?$")>; +def : InstRW<[VecMul], (instregex "VCKSM$")>; +def : InstRW<[VecXsPm], (instregex "VCLZ(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VCTZ(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VX$")>; +def : InstRW<[VecMul], (instregex "VGFM?$")>; +def : InstRW<[VecMul], (instregex "VGFMA(B|F|G|H)?$")>; +def : InstRW<[VecMul], (instregex "VGFM(B|F|G|H)$")>; +def : InstRW<[VecXsPm], (instregex "VLC(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VLP(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VMX(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VMXL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VMN(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VMNL(B|F|G|H)?$")>; +def : InstRW<[VecMul], (instregex "VMAL(B|F)?$")>; +def : InstRW<[VecMul], (instregex "VMALE(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VMALH(B|F|H|W)?$")>; +def : InstRW<[VecMul], (instregex "VMALO(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VMAO(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VMAE(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VMAH(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VME(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VMH(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VML(B|F)?$")>; +def : InstRW<[VecMul], (instregex "VMLE(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VMLH(B|F|H|W)?$")>; +def : InstRW<[VecMul], (instregex "VMLO(B|F|H)?$")>; +def : InstRW<[VecMul], (instregex "VMO(B|F|H)?$")>; +def : InstRW<[VecBF2], (instregex "VMSL(G)?$")>; + +def : InstRW<[VecXsPm], (instregex "VPOPCT(B|F|G|H)?$")>; + +def : InstRW<[VecXsPm], (instregex "VERLL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VERLLV(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VERIM(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VESL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VESLV(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VESRA(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VESRAV(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VESRL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VESRLV(B|F|G|H)?$")>; + +def : InstRW<[VecXsPm], (instregex "VSL(DB)?$")>; +def : InstRW<[VecXsPm], (instregex "VSLB$")>; +def : InstRW<[VecXsPm], (instregex "VSR(A|L)$")>; +def : InstRW<[VecXsPm], (instregex "VSR(A|L)B$")>; + +def : InstRW<[VecXsPm], (instregex "VSB(I|IQ|CBI|CBIQ)?$")>; +def : InstRW<[VecXsPm], (instregex "VSCBI(B|F|G|H|Q)?$")>; +def : InstRW<[VecXsPm], (instregex "VS(F|G|H|Q)?$")>; + +def : InstRW<[VecMul], (instregex "VSUM(B|H)?$")>; +def : InstRW<[VecMul], (instregex "VSUMG(F|H)?$")>; +def : InstRW<[VecMul], (instregex "VSUMQ(F|G)?$")>; + +//===----------------------------------------------------------------------===// +// Vector: Integer comparison +//===----------------------------------------------------------------------===// + +def : InstRW<[VecXsPm, Lat4], (instregex "VEC(B|F|G|H)?$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VECL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm], (instregex "VCEQ(B|F|G|H)?$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VCEQ(B|F|G|H)S$")>; +def : InstRW<[VecXsPm], (instregex "VCH(B|F|G|H)?$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VCH(B|F|G|H)S$")>; +def : InstRW<[VecXsPm], (instregex "VCHL(B|F|G|H)?$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VCHL(B|F|G|H)S$")>; +def : InstRW<[VecStr, Lat5], (instregex "VTM$")>; + +//===----------------------------------------------------------------------===// +// Vector: Floating-point arithmetic +//===----------------------------------------------------------------------===// + +// Conversion and rounding +def : InstRW<[VecBF], (instregex "VCD(L)?G$")>; +def : InstRW<[VecBF], (instregex "VCD(L)?GB$")>; +def : InstRW<[VecBF], (instregex "WCD(L)?GB$")>; +def : InstRW<[VecBF], (instregex "VC(L)?GD$")>; +def : InstRW<[VecBF], (instregex "VC(L)?GDB$")>; +def : InstRW<[VecBF], (instregex "WC(L)?GDB$")>; +def : InstRW<[VecBF], (instregex "VL(DE|ED)$")>; +def : InstRW<[VecBF], (instregex "VL(DE|ED)B$")>; +def : InstRW<[VecBF], (instregex "WL(DE|ED)B$")>; +def : InstRW<[VecBF], (instregex "VFL(L|R)$")>; +def : InstRW<[VecBF], (instregex "VFL(LS|RD)$")>; +def : InstRW<[VecBF], (instregex "WFL(LS|RD)$")>; +def : InstRW<[VecBF2], (instregex "WFLLD$")>; +def : InstRW<[VecDF2, Lat10], (instregex "WFLRX$")>; +def : InstRW<[VecBF2], (instregex "VFI$")>; +def : InstRW<[VecBF], (instregex "VFIDB$")>; +def : InstRW<[VecBF], (instregex "WFIDB$")>; +def : InstRW<[VecBF2], (instregex "VFISB$")>; +def : InstRW<[VecBF], (instregex "WFISB$")>; +def : InstRW<[VecDF2, Lat10], (instregex "WFIXB$")>; + +// Sign operations +def : InstRW<[VecXsPm], (instregex "VFPSO$")>; +def : InstRW<[VecXsPm], (instregex "(V|W)FPSODB$")>; +def : InstRW<[VecXsPm], (instregex "(V|W)FPSOSB$")>; +def : InstRW<[VecXsPm], (instregex "WFPSOXB$")>; +def : InstRW<[VecXsPm], (instregex "(V|W)FL(C|N|P)DB$")>; +def : InstRW<[VecXsPm], (instregex "(V|W)FL(C|N|P)SB$")>; +def : InstRW<[VecXsPm], (instregex "WFL(C|N|P)XB$")>; + +// Minimum / maximum +def : InstRW<[VecXsPm], (instregex "VF(MAX|MIN)$")>; +def : InstRW<[VecXsPm], (instregex "VF(MAX|MIN)DB$")>; +def : InstRW<[VecXsPm], (instregex "WF(MAX|MIN)DB$")>; +def : InstRW<[VecXsPm], (instregex "VF(MAX|MIN)SB$")>; +def : InstRW<[VecXsPm], (instregex "WF(MAX|MIN)SB$")>; +def : InstRW<[VecDFX], (instregex "WF(MAX|MIN)XB$")>; + +// Test data class +def : InstRW<[VecXsPm, Lat4], (instregex "VFTCI$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "(V|W)FTCIDB$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "(V|W)FTCISB$")>; +def : InstRW<[VecDFX, Lat4], (instregex "WFTCIXB$")>; + +// Add / subtract +def : InstRW<[VecBF2], (instregex "VF(A|S)$")>; +def : InstRW<[VecBF], (instregex "VF(A|S)DB$")>; +def : InstRW<[VecBF], (instregex "WF(A|S)DB$")>; +def : InstRW<[VecBF2], (instregex "VF(A|S)SB$")>; +def : InstRW<[VecBF], (instregex "WF(A|S)SB$")>; +def : InstRW<[VecDF2, Lat10], (instregex "WF(A|S)XB$")>; + +// Multiply / multiply-and-add/subtract +def : InstRW<[VecBF2], (instregex "VFM$")>; +def : InstRW<[VecBF], (instregex "VFMDB$")>; +def : InstRW<[VecBF], (instregex "WFMDB$")>; +def : InstRW<[VecBF2], (instregex "VFMSB$")>; +def : InstRW<[VecBF], (instregex "WFMSB$")>; +def : InstRW<[VecDF2, Lat20], (instregex "WFMXB$")>; +def : InstRW<[VecBF2], (instregex "VF(N)?M(A|S)$")>; +def : InstRW<[VecBF], (instregex "VF(N)?M(A|S)DB$")>; +def : InstRW<[VecBF], (instregex "WF(N)?M(A|S)DB$")>; +def : InstRW<[VecBF2], (instregex "VF(N)?M(A|S)SB$")>; +def : InstRW<[VecBF], (instregex "WF(N)?M(A|S)SB$")>; +def : InstRW<[VecDF2, Lat20], (instregex "WF(N)?M(A|S)XB$")>; + +// Divide / square root +def : InstRW<[VecFPd], (instregex "VFD$")>; +def : InstRW<[VecFPd], (instregex "(V|W)FDDB$")>; +def : InstRW<[VecFPd], (instregex "(V|W)FDSB$")>; +def : InstRW<[VecFPd], (instregex "WFDXB$")>; +def : InstRW<[VecFPd], (instregex "VFSQ$")>; +def : InstRW<[VecFPd], (instregex "(V|W)FSQDB$")>; +def : InstRW<[VecFPd], (instregex "(V|W)FSQSB$")>; +def : InstRW<[VecFPd], (instregex "WFSQXB$")>; + +//===----------------------------------------------------------------------===// +// Vector: Floating-point comparison +//===----------------------------------------------------------------------===// + +def : InstRW<[VecXsPm], (instregex "VF(C|K)(E|H|HE)$")>; +def : InstRW<[VecXsPm], (instregex "VF(C|K)(E|H|HE)DB$")>; +def : InstRW<[VecXsPm], (instregex "WF(C|K)(E|H|HE)DB$")>; +def : InstRW<[VecXsPm], (instregex "VF(C|K)(E|H|HE)SB$")>; +def : InstRW<[VecXsPm], (instregex "WF(C|K)(E|H|HE)SB$")>; +def : InstRW<[VecDFX], (instregex "WF(C|K)(E|H|HE)XB$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VF(C|K)(E|H|HE)DBS$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)(E|H|HE)DBS$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "VF(C|K)(E|H|HE)SBS$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)(E|H|HE)SBS$")>; +def : InstRW<[VecDFX, Lat4], (instregex "WF(C|K)(E|H|HE)XBS$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)DB$")>; +def : InstRW<[VecXsPm, Lat4], (instregex "WF(C|K)SB$")>; +def : InstRW<[VecDFX, Lat4], (instregex "WF(C|K)XB$")>; + +//===----------------------------------------------------------------------===// +// Vector: Floating-point insertion and extraction +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb], (instregex "LEFR$")>; +def : InstRW<[FXb, Lat4], (instregex "LFER$")>; + +//===----------------------------------------------------------------------===// +// Vector: String instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[VecStr], (instregex "VFAE(B)?$")>; +def : InstRW<[VecStr, Lat5], (instregex "VFAEBS$")>; +def : InstRW<[VecStr], (instregex "VFAE(F|H)$")>; +def : InstRW<[VecStr, Lat5], (instregex "VFAE(F|H)S$")>; +def : InstRW<[VecStr], (instregex "VFAEZ(B|F|H)$")>; +def : InstRW<[VecStr, Lat5], (instregex "VFAEZ(B|F|H)S$")>; +def : InstRW<[VecStr], (instregex "VFEE(B|F|H|ZB|ZF|ZH)?$")>; +def : InstRW<[VecStr, Lat5], (instregex "VFEE(B|F|H|ZB|ZF|ZH)S$")>; +def : InstRW<[VecStr], (instregex "VFENE(B|F|H|ZB|ZF|ZH)?$")>; +def : InstRW<[VecStr, Lat5], (instregex "VFENE(B|F|H|ZB|ZF|ZH)S$")>; +def : InstRW<[VecStr], (instregex "VISTR(B|F|H)?$")>; +def : InstRW<[VecStr, Lat5], (instregex "VISTR(B|F|H)S$")>; +def : InstRW<[VecStr], (instregex "VSTRC(B|F|H)?$")>; +def : InstRW<[VecStr, Lat5], (instregex "VSTRC(B|F|H)S$")>; +def : InstRW<[VecStr], (instregex "VSTRCZ(B|F|H)$")>; +def : InstRW<[VecStr, Lat5], (instregex "VSTRCZ(B|F|H)S$")>; + +//===----------------------------------------------------------------------===// +// Vector: Packed-decimal instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[VecDF, VecDF, Lat10], (instregex "VLIP$")>; +def : InstRW<[VecDFX, LSU, GroupAlone], (instregex "VPKZ$")>; +def : InstRW<[VecDFX, FXb, LSU, Lat12, BeginGroup], (instregex "VUPKZ$")>; +def : InstRW<[VecDF, VecDF, FXb, Lat20, GroupAlone], (instregex "VCVB(G)?$")>; +def : InstRW<[VecDF, VecDF, FXb, Lat20, GroupAlone], (instregex "VCVD(G)?$")>; +def : InstRW<[VecDFX], (instregex "V(A|S)P$")>; +def : InstRW<[VecDF, VecDF, Lat30, GroupAlone], (instregex "VM(S)?P$")>; +def : InstRW<[VecDF, VecDF, Lat30, GroupAlone], (instregex "V(D|R)P$")>; +def : InstRW<[VecDFX, Lat30, GroupAlone], (instregex "VSDP$")>; +def : InstRW<[VecDF, VecDF, Lat11], (instregex "VSRP$")>; +def : InstRW<[VecDFX], (instregex "VPSOP$")>; +def : InstRW<[VecDFX], (instregex "V(T|C)P$")>; + + +// -------------------------------- System ---------------------------------- // + +//===----------------------------------------------------------------------===// +// System: Program-Status Word Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "EPSW$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LPSW(E)?$")>; +def : InstRW<[FXa, Lat3, GroupAlone], (instregex "IPK$")>; +def : InstRW<[LSU, EndGroup], (instregex "SPKA$")>; +def : InstRW<[LSU, EndGroup], (instregex "SSM$")>; +def : InstRW<[FXb, LSU, GroupAlone], (instregex "ST(N|O)SM$")>; +def : InstRW<[FXa, Lat3], (instregex "IAC$")>; +def : InstRW<[LSU, EndGroup], (instregex "SAC(F)?$")>; + +//===----------------------------------------------------------------------===// +// System: Control Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "LCTL(G)?$")>; +def : InstRW<[LSU, Lat30], (instregex "STCT(L|G)$")>; +def : InstRW<[LSU], (instregex "E(P|S)A(I)?R$")>; +def : InstRW<[FXb, Lat30], (instregex "SSA(I)?R$")>; +def : InstRW<[FXb, Lat30], (instregex "ESEA$")>; + +//===----------------------------------------------------------------------===// +// System: Prefix-Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "SPX$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STPX$")>; + +//===----------------------------------------------------------------------===// +// System: Storage-Key and Real Memory Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "ISKE$")>; +def : InstRW<[FXb, Lat30], (instregex "IVSK$")>; +def : InstRW<[FXb, Lat30], (instregex "SSKE(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "RRB(E|M)$")>; +def : InstRW<[FXb, Lat30], (instregex "IRBM$")>; +def : InstRW<[FXb, Lat30], (instregex "PFMF$")>; +def : InstRW<[FXb, Lat30], (instregex "TB$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "PGIN$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "PGOUT$")>; + +//===----------------------------------------------------------------------===// +// System: Dynamic-Address-Translation Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "IPTE(Opt)?(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "IDTE(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "CRDTE(Opt)?$")>; +def : InstRW<[FXb, Lat30], (instregex "PTLB$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "CSP(G)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LPTEA$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LRA(Y|G)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STRAG$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "LURA(G)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STUR(A|G)$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "TPROT$")>; + +//===----------------------------------------------------------------------===// +// System: Memory-move Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXa, FXa, FXb, LSU, Lat8, GroupAlone], (instregex "MVC(K|P|S)$")>; +def : InstRW<[FXa, LSU, Lat6, GroupAlone], (instregex "MVC(S|D)K$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "MVCOS$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVPG$")>; + +//===----------------------------------------------------------------------===// +// System: Address-Space Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "LASP$")>; +def : InstRW<[LSU, GroupAlone], (instregex "PALB$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "PC$")>; +def : InstRW<[FXb, Lat30], (instregex "PR$")>; +def : InstRW<[FXb, Lat30], (instregex "PT(I)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "RP$")>; +def : InstRW<[FXb, Lat30], (instregex "BS(G|A)$")>; +def : InstRW<[FXb, Lat20], (instregex "TAR$")>; + +//===----------------------------------------------------------------------===// +// System: Linkage-Stack Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30, EndGroup], (instregex "BAKR$")>; +def : InstRW<[FXb, Lat30], (instregex "EREG(G)?$")>; +def : InstRW<[FXb, Lat30], (instregex "(E|M)STA$")>; + +//===----------------------------------------------------------------------===// +// System: Time-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "PTFF$")>; +def : InstRW<[FXb, LSU, Lat20], (instregex "SCK$")>; +def : InstRW<[FXb, Lat30], (instregex "SCKPF$")>; +def : InstRW<[FXb, LSU, Lat20], (instregex "SCKC$")>; +def : InstRW<[LSU, LSU, GroupAlone], (instregex "SPT$")>; +def : InstRW<[LSU, LSU, LSU, FXa, FXa, FXb, Lat9, GroupAlone], + (instregex "STCK(F)?$")>; +def : InstRW<[LSU, LSU, LSU, LSU, FXa, FXa, FXb, FXb, Lat11, GroupAlone], + (instregex "STCKE$")>; +def : InstRW<[FXb, LSU, Lat9], (instregex "STCKC$")>; +def : InstRW<[LSU, LSU, FXb, Lat5, BeginGroup], (instregex "STPT$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, LSU, Lat30], (instregex "STAP$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STIDP$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STSI$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STFL(E)?$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "ECAG$")>; +def : InstRW<[FXa, LSU, Lat30], (instregex "ECTG$")>; +def : InstRW<[FXb, Lat30], (instregex "PTF$")>; +def : InstRW<[FXb, Lat30], (instregex "PCKMO$")>; + +//===----------------------------------------------------------------------===// +// System: Miscellaneous Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "SVC$")>; +def : InstRW<[FXb, GroupAlone], (instregex "MC$")>; +def : InstRW<[FXb, Lat30], (instregex "DIAG$")>; +def : InstRW<[FXb], (instregex "TRAC(E|G)$")>; +def : InstRW<[FXb, Lat30], (instregex "TRAP(2|4)$")>; +def : InstRW<[FXb, Lat30], (instregex "SIGP$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "SIGA$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "SIE$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Measurement Facility Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb], (instregex "LPP$")>; +def : InstRW<[FXb, Lat30], (instregex "ECPGA$")>; +def : InstRW<[FXb, Lat30], (instregex "E(C|P)CTR$")>; +def : InstRW<[FXb, Lat30], (instregex "LCCTL$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "L(P|S)CTL$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "Q(S|CTR)I$")>; +def : InstRW<[FXb, Lat30], (instregex "S(C|P)CTR$")>; + +//===----------------------------------------------------------------------===// +// System: I/O Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXb, Lat30], (instregex "(C|H|R|X)SCH$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "(M|S|ST|T)SCH$")>; +def : InstRW<[FXb, Lat30], (instregex "RCHP$")>; +def : InstRW<[FXb, Lat30], (instregex "SCHM$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "STC(PS|RW)$")>; +def : InstRW<[FXb, LSU, Lat30], (instregex "TPI$")>; +def : InstRW<[FXb, Lat30], (instregex "SAL$")>; + +} + diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td index a950e54..4d986e8 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td @@ -59,6 +59,7 @@ def : WriteRes<Lat30, []> { let Latency = 30; let NumMicroOps = 0;} def Z196_FXUnit : ProcResource<2>; def Z196_LSUnit : ProcResource<2>; def Z196_FPUnit : ProcResource<1>; +def Z196_DFUnit : ProcResource<1>; // Subtarget specific definitions of scheduling resources. def : WriteRes<FXU, [Z196_FXUnit]> { let Latency = 1; } @@ -66,6 +67,8 @@ def : WriteRes<LSU, [Z196_LSUnit]> { let Latency = 4; } def : WriteRes<LSU_lat1, [Z196_LSUnit]> { let Latency = 1; } def : WriteRes<FPU, [Z196_FPUnit]> { let Latency = 8; } def : WriteRes<FPU2, [Z196_FPUnit, Z196_FPUnit]> { let Latency = 9; } +def : WriteRes<DFU, [Z196_DFUnit]> { let Latency = 2; } +def : WriteRes<DFU2, [Z196_DFUnit, Z196_DFUnit]> { let Latency = 3; } // -------------------------- INSTRUCTIONS ---------------------------------- // @@ -152,6 +155,7 @@ def : InstRW<[FXU, LSU, Lat5], (instregex "MVI(Y)?$")>; // Move character def : InstRW<[LSU, LSU, LSU, FXU, Lat8, GroupAlone], (instregex "MVC$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCL(E|U)?$")>; // Pseudo -> reg move def : InstRW<[FXU], (instregex "COPY(_TO_REGCLASS)?$")>; @@ -226,6 +230,7 @@ def : InstRW<[LSU], (instregex "LLG(C|F|H|T|FRL|HRL)$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "STC(H|Y|Mux)?$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "STH(H|Y|RL|Mux)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "STCM(H|Y)?$")>; //===----------------------------------------------------------------------===// // Multi-register moves @@ -235,6 +240,9 @@ def : InstRW<[FXU, LSU, Lat5], (instregex "STH(H|Y|RL|Mux)?$")>; def : InstRW<[LSU, LSU, LSU, LSU, LSU, Lat10, GroupAlone], (instregex "LM(H|Y|G)?$")>; +// Load multiple disjoint +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "LMD$")>; + // Store multiple (estimated average of 3 ops) def : InstRW<[LSU, LSU, FXU, FXU, FXU, Lat10, GroupAlone], (instregex "STM(H|Y|G)?$")>; @@ -246,6 +254,7 @@ def : InstRW<[LSU, LSU, FXU, FXU, FXU, Lat10, GroupAlone], def : InstRW<[FXU], (instregex "LRV(G)?R$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "LRV(G|H)?$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "STRV(G|H)?$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCIN$")>; //===----------------------------------------------------------------------===// // Load address instructions @@ -285,7 +294,7 @@ def : InstRW<[FXU], (instregex "IILL(64)?$")>; // Addition //===----------------------------------------------------------------------===// -def : InstRW<[FXU, LSU, Lat5], (instregex "A(Y|SI)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "A(L)?(Y|SI)?$")>; def : InstRW<[FXU, FXU, LSU, Lat6, GroupAlone], (instregex "AH(Y)?$")>; def : InstRW<[FXU], (instregex "AIH$")>; def : InstRW<[FXU], (instregex "AFI(Mux)?$")>; @@ -294,15 +303,17 @@ def : InstRW<[FXU], (instregex "AGHI(K)?$")>; def : InstRW<[FXU], (instregex "AGR(K)?$")>; def : InstRW<[FXU], (instregex "AHI(K)?$")>; def : InstRW<[FXU], (instregex "AHIMux(K)?$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "AL(Y)?$")>; def : InstRW<[FXU], (instregex "AL(FI|HSIK)$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "ALG(F)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "ALGF$")>; def : InstRW<[FXU], (instregex "ALGHSIK$")>; def : InstRW<[FXU], (instregex "ALGF(I|R)$")>; def : InstRW<[FXU], (instregex "ALGR(K)?$")>; def : InstRW<[FXU], (instregex "ALR(K)?$")>; def : InstRW<[FXU], (instregex "AR(K)?$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "AG(SI)?$")>; +def : InstRW<[FXU], (instregex "A(L)?HHHR$")>; +def : InstRW<[FXU, FXU, Lat3, GroupAlone], (instregex "A(L)?HHLR$")>; +def : InstRW<[FXU], (instregex "ALSIH(N)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "A(L)?G(SI)?$")>; // Logical addition with carry def : InstRW<[FXU, LSU, Lat7, GroupAlone], (instregex "ALC(G)?$")>; @@ -325,6 +336,8 @@ def : InstRW<[FXU], (instregex "SLGF(I|R)$")>; def : InstRW<[FXU], (instregex "SLGR(K)?$")>; def : InstRW<[FXU], (instregex "SLR(K)?$")>; def : InstRW<[FXU], (instregex "SR(K)?$")>; +def : InstRW<[FXU], (instregex "S(L)?HHHR$")>; +def : InstRW<[FXU, FXU, Lat3, GroupAlone], (instregex "S(L)?HHLR$")>; // Subtraction with borrow def : InstRW<[FXU, LSU, Lat7, GroupAlone], (instregex "SLB(G)?$")>; @@ -390,16 +403,22 @@ def : InstRW<[FXU, Lat6], (instregex "MS(R|FI)$")>; def : InstRW<[FXU, LSU, Lat12], (instregex "MSG$")>; def : InstRW<[FXU, Lat8], (instregex "MSGR$")>; def : InstRW<[FXU, Lat6], (instregex "MSGF(I|R)$")>; -def : InstRW<[FXU, LSU, Lat15, GroupAlone], (instregex "MLG$")>; -def : InstRW<[FXU, Lat9, GroupAlone], (instregex "MLGR$")>; +def : InstRW<[FXU, FXU, LSU, Lat15, GroupAlone], (instregex "MLG$")>; +def : InstRW<[FXU, FXU, Lat9, GroupAlone], (instregex "MLGR$")>; def : InstRW<[FXU, Lat5], (instregex "MGHI$")>; def : InstRW<[FXU, Lat5], (instregex "MHI$")>; def : InstRW<[FXU, LSU, Lat9], (instregex "MH(Y)?$")>; +def : InstRW<[FXU, FXU, Lat7, GroupAlone], (instregex "M(L)?R$")>; +def : InstRW<[FXU, FXU, LSU, Lat7, GroupAlone], (instregex "M(FY|L)?$")>; //===----------------------------------------------------------------------===// // Division and remainder //===----------------------------------------------------------------------===// +def : InstRW<[FPU2, FPU2, FXU, FXU, FXU, FXU, FXU, Lat30, GroupAlone], + (instregex "DR$")>; +def : InstRW<[FPU2, FPU2, LSU, FXU, FXU, FXU, FXU, Lat30, GroupAlone], + (instregex "D$")>; def : InstRW<[FPU2, FPU2, FXU, FXU, FXU, FXU, Lat30, GroupAlone], (instregex "DSG(F)?R$")>; def : InstRW<[FPU2, FPU2, LSU, FXU, FXU, FXU, Lat30, GroupAlone], @@ -416,7 +435,9 @@ def : InstRW<[FPU2, FPU2, LSU, FXU, FXU, FXU, FXU, Lat30, GroupAlone], def : InstRW<[FXU], (instregex "SLL(G|K)?$")>; def : InstRW<[FXU], (instregex "SRL(G|K)?$")>; def : InstRW<[FXU], (instregex "SRA(G|K)?$")>; -def : InstRW<[FXU, Lat2], (instregex "SLA(K)?$")>; +def : InstRW<[FXU, Lat2], (instregex "SLA(G|K)?$")>; +def : InstRW<[FXU, FXU, FXU, FXU, LSU, Lat8, GroupAlone], + (instregex "S(L|R)D(A|L)$")>; // Rotate def : InstRW<[FXU, LSU, Lat6], (instregex "RLL(G)?$")>; @@ -453,6 +474,8 @@ def : InstRW<[FXU], (instregex "CLIH$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "CLI(Y)?$")>; def : InstRW<[FXU], (instregex "CLR$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "CLRL$")>; +def : InstRW<[FXU], (instregex "C(L)?HHR$")>; +def : InstRW<[FXU, FXU, Lat3, GroupAlone], (instregex "C(L)?HLR$")>; // Compare halfword def : InstRW<[FXU, LSU, FXU, Lat6, GroupAlone], (instregex "CH(Y|RL)?$")>; @@ -465,7 +488,7 @@ def : InstRW<[FXU, FXU, Lat2, GroupAlone], (instregex "CGFR$")>; // Compare logical character def : InstRW<[LSU, LSU, FXU, Lat9, GroupAlone], (instregex "CLC$")>; - +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLCL(E|U)?$")>; def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLST$")>; // Test under mask @@ -476,6 +499,9 @@ def : InstRW<[FXU], (instregex "TMHL(64)?$")>; def : InstRW<[FXU], (instregex "TMLH(64)?$")>; def : InstRW<[FXU], (instregex "TMLL(64)?$")>; +// Compare logical characters under mask +def : InstRW<[FXU, FXU, LSU, Lat5, GroupAlone], (instregex "CLM(H|Y)?$")>; + //===----------------------------------------------------------------------===// // Prefetch //===----------------------------------------------------------------------===// @@ -507,7 +533,7 @@ def : InstRW<[FXU, FXU, FXU, FXU, FXU, FXU, LSU, LSU, Lat12, GroupAlone], (instregex "CDSG$")>; // Compare and swap and store -def : InstRW<[FXU, Lat30, GroupAlone], (instregex "CSST$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "CSST$")>; // Perform locked operation def : InstRW<[LSU, Lat30, GroupAlone], (instregex "PLO$")>; @@ -520,6 +546,50 @@ def : InstRW<[FXU, FXU, LSU, LSU, Lat6, GroupAlone], (instregex "STPQ$")>; def : InstRW<[LSU, LSU, Lat5, GroupAlone], (instregex "LPD(G)?$")>; //===----------------------------------------------------------------------===// +// Translate and convert +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "TR$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "TRT$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "TRTR$")>; +def : InstRW<[FXU, Lat30], (instregex "TR(TR)?(T)?(E|EOpt)?$")>; +def : InstRW<[LSU, Lat30], (instregex "TR(T|O)(T|O)(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "CU(12|14|21|24|41|42)(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "(CUUTF|CUTFU)(Opt)?$")>; + +//===----------------------------------------------------------------------===// +// Message-security assist +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "KM(C|F|O|CTR)?$")>; +def : InstRW<[FXU, Lat30], (instregex "(KIMD|KLMD|KMAC|PCC)$")>; + +//===----------------------------------------------------------------------===// +// Decimal arithmetic +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, DFU2, LSU, LSU, Lat30, GroupAlone], (instregex "CVBG$")>; +def : InstRW<[FXU, DFU, LSU, Lat30, GroupAlone], (instregex "CVB(Y)?$")>; +def : InstRW<[FXU, FXU, FXU, DFU2, DFU2, LSU, Lat30, GroupAlone], + (instregex "CVDG$")>; +def : InstRW<[FXU, FXU, DFU, LSU, Lat30, GroupAlone], (instregex "CVD(Y)?$")>; +def : InstRW<[LSU, Lat10, GroupAlone], (instregex "MVO$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MV(N|Z)$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "(PACK|PKA|PKU)$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "UNPK$")>; +def : InstRW<[LSU, Lat12, GroupAlone], (instregex "UNPK(A|U)$")>; + +def : InstRW<[FXU, DFU2, DFU2, LSU, LSU, Lat15, GroupAlone], + (instregex "(A|S|ZA)P$")>; +def : InstRW<[FXU, DFU2, DFU2, LSU, LSU, Lat30, GroupAlone], + (instregex "(M|D)P$")>; +def : InstRW<[FXU, FXU, DFU2, DFU2, LSU, LSU, LSU, Lat15, GroupAlone], + (instregex "SRP$")>; +def : InstRW<[DFU2, DFU2, LSU, LSU, Lat11, GroupAlone], (instregex "CP$")>; +def : InstRW<[DFU2, LSU, LSU, GroupAlone], (instregex "TP$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "ED(MK)?$")>; + +//===----------------------------------------------------------------------===// // Access registers //===----------------------------------------------------------------------===// @@ -560,40 +630,29 @@ def : InstRW<[FXU, FXU, LSU, Lat6, GroupAlone], (instregex "BASSM$")>; //===----------------------------------------------------------------------===// // Find leftmost one -def : InstRW<[FXU, Lat7, GroupAlone], (instregex "FLOGR$")>; +def : InstRW<[FXU, FXU, Lat7, GroupAlone], (instregex "FLOGR$")>; // Population count def : InstRW<[FXU, Lat3], (instregex "POPCNT$")>; // Extend -def : InstRW<[FXU], (instregex "AEXT128_64$")>; -def : InstRW<[FXU], (instregex "ZEXT128_(32|64)$")>; +def : InstRW<[FXU], (instregex "AEXT128$")>; +def : InstRW<[FXU], (instregex "ZEXT128$")>; // String instructions def : InstRW<[FXU, LSU, Lat30], (instregex "SRST$")>; +def : InstRW<[FXU, Lat30], (instregex "SRSTU$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CUSE$")>; -// Move with key -def : InstRW<[LSU, Lat8, GroupAlone], (instregex "MVCK$")>; - -// Extract CPU Time -def : InstRW<[FXU, Lat5, LSU], (instregex "ECTG$")>; +// Various complex instructions +def : InstRW<[LSU, Lat30], (instregex "CFC$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "UPT$")>; +def : InstRW<[LSU, Lat30], (instregex "CKSM$")>; +def : InstRW<[FXU, Lat30], (instregex "CMPSC$")>; // Execute def : InstRW<[LSU, GroupAlone], (instregex "EX(RL)?$")>; -// Program return -def : InstRW<[FXU, Lat30], (instregex "PR$")>; - -// Inline assembly -def : InstRW<[FXU, LSU, Lat15], (instregex "STCK$")>; -def : InstRW<[FXU, LSU, Lat12], (instregex "STCKF$")>; -def : InstRW<[LSU, FXU, Lat5], (instregex "STCKE$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "STFLE$")>; -def : InstRW<[FXU, Lat30], (instregex "SVC$")>; - -// Store real address -def : InstRW<[FXU, LSU, Lat5], (instregex "STRAG$")>; - //===----------------------------------------------------------------------===// // .insn directive instructions //===----------------------------------------------------------------------===// @@ -730,9 +789,9 @@ def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "MXDBR$")>; def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "MXBR$")>; // Multiply and add / subtract -def : InstRW<[FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>; def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)EBR$")>; -def : InstRW<[FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>; def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)DBR$")>; // Division @@ -740,14 +799,17 @@ def : InstRW<[FPU, LSU, Lat30], (instregex "D(E|D)B$")>; def : InstRW<[FPU, Lat30], (instregex "D(E|D)BR$")>; def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "DXBR$")>; +// Divide to integer +def : InstRW<[FPU, Lat30], (instregex "DI(E|D)BR$")>; + //===----------------------------------------------------------------------===// // FP: Comparisons //===----------------------------------------------------------------------===// // Compare -def : InstRW<[FPU, LSU, Lat12], (instregex "C(E|D)B$")>; -def : InstRW<[FPU], (instregex "C(E|D)BR$")>; -def : InstRW<[FPU, FPU, Lat30], (instregex "CXBR$")>; +def : InstRW<[FPU, LSU, Lat12], (instregex "(K|C)(E|D)B$")>; +def : InstRW<[FPU], (instregex "(K|C)(E|D)BR$")>; +def : InstRW<[FPU, FPU, Lat30], (instregex "(K|C)XBR$")>; // Test Data Class def : InstRW<[FPU, LSU, Lat15], (instregex "TC(E|D)B$")>; @@ -760,10 +822,396 @@ def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "TCXB$")>; def : InstRW<[FXU, LSU, Lat4, GroupAlone], (instregex "EFPC$")>; def : InstRW<[LSU, Lat3, GroupAlone], (instregex "SFPC$")>; def : InstRW<[LSU, LSU, Lat6, GroupAlone], (instregex "LFPC$")>; -def : InstRW<[LSU, Lat3, GroupAlone], (instregex "STFPC$")>; -def : InstRW<[FXU, Lat30, GroupAlone], (instregex "SFASR$")>; -def : InstRW<[FXU, LSU, Lat30, GroupAlone], (instregex "LFAS$")>; +def : InstRW<[FXU, LSU, Lat3, GroupAlone], (instregex "STFPC$")>; +def : InstRW<[FXU, Lat30], (instregex "SFASR$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LFAS$")>; def : InstRW<[FXU, Lat2, GroupAlone], (instregex "SRNM(B|T)?$")>; + +// --------------------- Hexadecimal floating point ------------------------- // + +//===----------------------------------------------------------------------===// +// HFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[FPU], (instregex "LT(D|E)R$")>; +def : InstRW<[FPU2, FPU2, Lat9, GroupAlone], (instregex "LTXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[FPU], (instregex "(LEDR|LRER)$")>; +def : InstRW<[FPU], (instregex "LEXR$")>; +def : InstRW<[FPU], (instregex "(LDXR|LRDR)$")>; + +// Load lengthened +def : InstRW<[LSU], (instregex "LDE$")>; +def : InstRW<[FXU], (instregex "LDER$")>; +def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "LX(D|E)$")>; +def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "LX(D|E)R$")>; + +// Convert from fixed +def : InstRW<[FXU, FPU, Lat9, GroupAlone], (instregex "CE(F|G)R$")>; +def : InstRW<[FXU, FPU, Lat9, GroupAlone], (instregex "CD(F|G)R$")>; +def : InstRW<[FXU, FPU2, FPU2, Lat11, GroupAlone], (instregex "CX(F|G)R$")>; + +// Convert to fixed +def : InstRW<[FXU, FPU, Lat12, GroupAlone], (instregex "CF(E|D)R$")>; +def : InstRW<[FXU, FPU, Lat12, GroupAlone], (instregex "CG(E|D)R$")>; +def : InstRW<[FXU, FPU, FPU, Lat20, GroupAlone], (instregex "C(F|G)XR$")>; + +// Convert BFP to HFP / HFP to BFP. +def : InstRW<[FPU], (instregex "THD(E)?R$")>; +def : InstRW<[FPU], (instregex "TB(E)?DR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load Complement / Negative / Positive +def : InstRW<[FPU], (instregex "L(C|N|P)DR$")>; +def : InstRW<[FPU], (instregex "L(C|N|P)ER$")>; +def : InstRW<[FPU2, FPU2, Lat9, GroupAlone], (instregex "L(C|N|P)XR$")>; + +// Halve +def : InstRW<[FPU], (instregex "H(E|D)R$")>; + +// Square root +def : InstRW<[FPU, LSU, Lat30], (instregex "SQ(E|D)$")>; +def : InstRW<[FPU, Lat30], (instregex "SQ(E|D)R$")>; +def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "SQXR$")>; + +// Load FP integer +def : InstRW<[FPU], (instregex "FIER$")>; +def : InstRW<[FPU], (instregex "FIDR$")>; +def : InstRW<[FPU2, FPU2, Lat15, GroupAlone], (instregex "FIXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[FPU, LSU, Lat12], (instregex "A(E|D|U|W)$")>; +def : InstRW<[FPU], (instregex "A(E|D|U|W)R$")>; +def : InstRW<[FPU2, FPU2, Lat20, GroupAlone], (instregex "AXR$")>; + +// Subtraction +def : InstRW<[FPU, LSU, Lat12], (instregex "S(E|D|U|W)$")>; +def : InstRW<[FPU], (instregex "S(E|D|U|W)R$")>; +def : InstRW<[FPU2, FPU2, Lat20, GroupAlone], (instregex "SXR$")>; + +// Multiply +def : InstRW<[FPU, LSU, Lat12], (instregex "M(D|DE|E|EE)$")>; +def : InstRW<[FPU], (instregex "M(D|DE|E|EE)R$")>; +def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "MXD$")>; +def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "MXDR$")>; +def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "MXR$")>; +def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "MY$")>; +def : InstRW<[FPU, FPU, LSU, Lat15, GroupAlone], (instregex "MY(H|L)$")>; +def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "MYR$")>; +def : InstRW<[FPU, Lat10, GroupAlone], (instregex "MY(H|L)R$")>; + +// Multiply and add / subtract +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)E$")>; +def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)ER$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)D$")>; +def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)DR$")>; +def : InstRW<[FPU2, FPU2, LSU, GroupAlone], (instregex "MAY$")>; +def : InstRW<[FPU2, FPU2, GroupAlone], (instregex "MAYR$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "MAY(H|L)$")>; +def : InstRW<[FPU, GroupAlone], (instregex "MAY(H|L)R$")>; + +// Division +def : InstRW<[FPU, LSU, Lat30], (instregex "D(E|D)$")>; +def : InstRW<[FPU, Lat30], (instregex "D(E|D)R$")>; +def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "DXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[FPU, LSU, Lat12], (instregex "C(E|D)$")>; +def : InstRW<[FPU], (instregex "C(E|D)R$")>; +def : InstRW<[FPU, FPU, Lat15], (instregex "CXR$")>; + + +// ------------------------ Decimal floating point -------------------------- // + +//===----------------------------------------------------------------------===// +// DFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[DFU, Lat20], (instregex "LTDTR$")>; +def : InstRW<[DFU2, DFU2, Lat20, GroupAlone], (instregex "LTXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[DFU, Lat30], (instregex "LEDTR$")>; +def : InstRW<[DFU, DFU, Lat30], (instregex "LDXTR$")>; + +// Load lengthened +def : InstRW<[DFU, Lat20], (instregex "LDETR$")>; +def : InstRW<[DFU2, DFU2, Lat20, GroupAlone], (instregex "LXDTR$")>; + +// Convert from fixed / logical +def : InstRW<[FXU, DFU, Lat9, GroupAlone], (instregex "CDFTR$")>; +def : InstRW<[FXU, DFU, Lat30, GroupAlone], (instregex "CDGTR(A)?$")>; +def : InstRW<[FXU, DFU2, DFU2, GroupAlone], (instregex "CXFTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat30, GroupAlone], (instregex "CXGTR(A)?$")>; +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "CDL(F|G)TR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat11, GroupAlone], (instregex "CXLFTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat6, GroupAlone], (instregex "CXLGTR$")>; + +// Convert to fixed / logical +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "CFDTR(A)?$")>; +def : InstRW<[FXU, DFU, Lat30, GroupAlone], (instregex "CGDTR(A)?$")>; +def : InstRW<[FXU, DFU, DFU, Lat11, GroupAlone], (instregex "CFXTR$")>; +def : InstRW<[FXU, DFU, DFU, Lat30, GroupAlone], (instregex "CGXTR(A)?$")>; +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "CL(F|G)DTR$")>; +def : InstRW<[FXU, DFU, DFU, Lat11, GroupAlone], (instregex "CL(F|G)XTR$")>; + +// Convert from / to signed / unsigned packed +def : InstRW<[FXU, DFU, Lat12, GroupAlone], (instregex "CD(S|U)TR$")>; +def : InstRW<[FXU, FXU, DFU2, DFU2, Lat20, GroupAlone], (instregex "CX(S|U)TR$")>; +def : InstRW<[FXU, DFU, Lat12, GroupAlone], (instregex "C(S|U)DTR$")>; +def : InstRW<[FXU, FXU, DFU2, DFU2, Lat20, GroupAlone], (instregex "C(S|U)XTR$")>; + +// Perform floating-point operation +def : InstRW<[FXU, Lat30], (instregex "PFPO$")>; + +//===----------------------------------------------------------------------===// +// DFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load FP integer +def : InstRW<[DFU, Lat20], (instregex "FIDTR$")>; +def : InstRW<[DFU2, DFU2, Lat20, GroupAlone], (instregex "FIXTR$")>; + +// Extract biased exponent +def : InstRW<[FXU, DFU, Lat15, GroupAlone], (instregex "EEDTR$")>; +def : InstRW<[FXU, DFU2, Lat15, GroupAlone], (instregex "EEXTR$")>; + +// Extract significance +def : InstRW<[FXU, DFU, Lat15, GroupAlone], (instregex "ESDTR$")>; +def : InstRW<[FXU, DFU, DFU, Lat20, GroupAlone], (instregex "ESXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[DFU, Lat30], (instregex "ADTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "AXTR(A)?$")>; + +// Subtraction +def : InstRW<[DFU, Lat30], (instregex "SDTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "SXTR(A)?$")>; + +// Multiply +def : InstRW<[DFU, Lat30], (instregex "MDTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "MXTR(A)?$")>; + +// Division +def : InstRW<[DFU, Lat30], (instregex "DDTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "DXTR(A)?$")>; + +// Quantize +def : InstRW<[DFU, Lat30], (instregex "QADTR$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "QAXTR$")>; + +// Reround +def : InstRW<[FXU, DFU, Lat30, GroupAlone], (instregex "RRDTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat30, GroupAlone], (instregex "RRXTR$")>; + +// Shift significand left/right +def : InstRW<[LSU, DFU, Lat11, GroupAlone], (instregex "S(L|R)DT$")>; +def : InstRW<[LSU, DFU2, DFU2, Lat15, GroupAlone], (instregex "S(L|R)XT$")>; + +// Insert biased exponent +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "IEDTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat15, GroupAlone], (instregex "IEXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[DFU, Lat11], (instregex "(K|C)DTR$")>; +def : InstRW<[DFU, DFU, Lat15], (instregex "(K|C)XTR$")>; + +// Compare biased exponent +def : InstRW<[DFU, Lat8], (instregex "CEDTR$")>; +def : InstRW<[DFU2, Lat9], (instregex "CEXTR$")>; + +// Test Data Class/Group +def : InstRW<[LSU, DFU, Lat15], (instregex "TD(C|G)(E|D)T$")>; +def : InstRW<[LSU, DFU2, Lat15], (instregex "TD(C|G)XT$")>; + + +// -------------------------------- System ---------------------------------- // + +//===----------------------------------------------------------------------===// +// System: Program-Status Word Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "EPSW$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LPSW(E)?$")>; +def : InstRW<[FXU, Lat3, GroupAlone], (instregex "IPK$")>; +def : InstRW<[LSU, EndGroup], (instregex "SPKA$")>; +def : InstRW<[LSU, EndGroup], (instregex "SSM$")>; +def : InstRW<[FXU, LSU, GroupAlone], (instregex "ST(N|O)SM$")>; +def : InstRW<[FXU, Lat3], (instregex "IAC$")>; +def : InstRW<[LSU, EndGroup], (instregex "SAC(F)?$")>; + +//===----------------------------------------------------------------------===// +// System: Control Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "LCTL(G)?$")>; +def : InstRW<[FXU, LSU, LSU, LSU, LSU, Lat10, GroupAlone], + (instregex "STCT(L|G)$")>; +def : InstRW<[LSU], (instregex "E(P|S)A(I)?R$")>; +def : InstRW<[FXU, Lat30], (instregex "SSA(I)?R$")>; +def : InstRW<[FXU, Lat30], (instregex "ESEA$")>; + +//===----------------------------------------------------------------------===// +// System: Prefix-Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "SPX$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STPX$")>; + +//===----------------------------------------------------------------------===// +// System: Storage-Key and Real Memory Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "ISKE$")>; +def : InstRW<[FXU, Lat30], (instregex "IVSK$")>; +def : InstRW<[FXU, Lat30], (instregex "SSKE(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "RRB(E|M)$")>; +def : InstRW<[FXU, Lat30], (instregex "PFMF$")>; +def : InstRW<[FXU, Lat30], (instregex "TB$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "PGIN$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "PGOUT$")>; + +//===----------------------------------------------------------------------===// +// System: Dynamic-Address-Translation Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "IPTE(Opt)?(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "IDTE(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "PTLB$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "CSP(G)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LPTEA$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LRA(Y|G)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STRAG$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LURA(G)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STUR(A|G)$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "TPROT$")>; + +//===----------------------------------------------------------------------===// +// System: Memory-move Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU, Lat8, GroupAlone], (instregex "MVC(K|P|S)$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCSK$")>; +def : InstRW<[LSU, Lat6, GroupAlone], (instregex "MVCDK$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "MVCOS$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVPG$")>; + +//===----------------------------------------------------------------------===// +// System: Address-Space Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "LASP$")>; +def : InstRW<[LSU, GroupAlone], (instregex "PALB$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "PC$")>; +def : InstRW<[FXU, Lat30], (instregex "PR$")>; +def : InstRW<[FXU, Lat30], (instregex "PT(I)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "RP$")>; +def : InstRW<[FXU, Lat30], (instregex "BS(G|A)$")>; +def : InstRW<[FXU, Lat20], (instregex "TAR$")>; + +//===----------------------------------------------------------------------===// +// System: Linkage-Stack Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30, EndGroup], (instregex "BAKR$")>; +def : InstRW<[FXU, Lat30], (instregex "EREG(G)?$")>; +def : InstRW<[FXU, Lat30], (instregex "(E|M)STA$")>; + +//===----------------------------------------------------------------------===// +// System: Time-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "PTFF$")>; +def : InstRW<[FXU, LSU, Lat20], (instregex "SCK$")>; +def : InstRW<[FXU, Lat30], (instregex "SCKPF$")>; +def : InstRW<[FXU, LSU, Lat20], (instregex "SCKC$")>; +def : InstRW<[FXU, LSU, Lat20], (instregex "SPT$")>; +def : InstRW<[FXU, LSU, Lat15], (instregex "STCK$")>; +def : InstRW<[FXU, LSU, Lat12], (instregex "STCKF$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STCKE$")>; +def : InstRW<[FXU, LSU, Lat9], (instregex "STCKC$")>; +def : InstRW<[FXU, LSU, Lat8], (instregex "STPT$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "STAP$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STIDP$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STSI$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STFL(E)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "ECAG$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "ECTG$")>; +def : InstRW<[FXU, Lat30], (instregex "PTF$")>; +def : InstRW<[FXU, Lat30], (instregex "PCKMO$")>; + +//===----------------------------------------------------------------------===// +// System: Miscellaneous Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "SVC$")>; +def : InstRW<[FXU, GroupAlone], (instregex "MC$")>; +def : InstRW<[FXU, Lat30], (instregex "DIAG$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "TRAC(E|G)$")>; +def : InstRW<[FXU, Lat30], (instregex "TRAP(2|4)$")>; +def : InstRW<[FXU, Lat30], (instregex "SIGP$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "SIGA$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "SIE$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Measurement Facility Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU], (instregex "LPP$")>; +def : InstRW<[FXU, Lat30], (instregex "ECPGA$")>; +def : InstRW<[FXU, Lat30], (instregex "E(C|P)CTR$")>; +def : InstRW<[FXU, Lat30], (instregex "LCCTL$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "L(P|S)CTL$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "Q(S|CTR)I$")>; +def : InstRW<[FXU, Lat30], (instregex "S(C|P)CTR$")>; + +//===----------------------------------------------------------------------===// +// System: I/O Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "(C|H|R|X)SCH$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "(M|S|ST|T)SCH$")>; +def : InstRW<[FXU, Lat30], (instregex "RCHP$")>; +def : InstRW<[FXU, Lat30], (instregex "SCHM$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STC(PS|RW)$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "TPI$")>; +def : InstRW<[FXU, Lat30], (instregex "SAL$")>; + } diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td index 8ab6c82..a0f2115 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td +++ b/contrib/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td @@ -59,6 +59,7 @@ def : WriteRes<Lat30, []> { let Latency = 30; let NumMicroOps = 0;} def ZEC12_FXUnit : ProcResource<2>; def ZEC12_LSUnit : ProcResource<2>; def ZEC12_FPUnit : ProcResource<1>; +def ZEC12_DFUnit : ProcResource<1>; def ZEC12_VBUnit : ProcResource<1>; // Subtarget specific definitions of scheduling resources. @@ -67,6 +68,8 @@ def : WriteRes<LSU, [ZEC12_LSUnit]> { let Latency = 4; } def : WriteRes<LSU_lat1, [ZEC12_LSUnit]> { let Latency = 1; } def : WriteRes<FPU, [ZEC12_FPUnit]> { let Latency = 8; } def : WriteRes<FPU2, [ZEC12_FPUnit, ZEC12_FPUnit]> { let Latency = 9; } +def : WriteRes<DFU, [ZEC12_DFUnit]> { let Latency = 2; } +def : WriteRes<DFU2, [ZEC12_DFUnit, ZEC12_DFUnit]> { let Latency = 3; } def : WriteRes<VBU, [ZEC12_VBUnit]>; // Virtual Branching Unit // -------------------------- INSTRUCTIONS ---------------------------------- // @@ -155,6 +158,7 @@ def : InstRW<[FXU, LSU, Lat5], (instregex "MVI(Y)?$")>; // Move character def : InstRW<[LSU, LSU, LSU, FXU, Lat8, GroupAlone], (instregex "MVC$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCL(E|U)?$")>; // Pseudo -> reg move def : InstRW<[FXU], (instregex "COPY(_TO_REGCLASS)?$")>; @@ -236,6 +240,7 @@ def : InstRW<[FXU, LSU, Lat5], (instregex "LLG(F|T)?AT$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "STC(H|Y|Mux)?$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "STH(H|Y|RL|Mux)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "STCM(H|Y)?$")>; //===----------------------------------------------------------------------===// // Multi-register moves @@ -245,6 +250,9 @@ def : InstRW<[FXU, LSU, Lat5], (instregex "STH(H|Y|RL|Mux)?$")>; def : InstRW<[LSU, LSU, LSU, LSU, LSU, Lat10, GroupAlone], (instregex "LM(H|Y|G)?$")>; +// Load multiple disjoint +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "LMD$")>; + // Store multiple (estimated average of 3 ops) def : InstRW<[LSU, LSU, FXU, FXU, FXU, Lat10, GroupAlone], (instregex "STM(H|Y|G)?$")>; @@ -256,6 +264,7 @@ def : InstRW<[LSU, LSU, FXU, FXU, FXU, Lat10, GroupAlone], def : InstRW<[FXU], (instregex "LRV(G)?R$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "LRV(G|H)?$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "STRV(G|H)?$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVCIN$")>; //===----------------------------------------------------------------------===// // Load address instructions @@ -295,7 +304,7 @@ def : InstRW<[FXU], (instregex "IILL(64)?$")>; // Addition //===----------------------------------------------------------------------===// -def : InstRW<[FXU, LSU, Lat5], (instregex "A(Y|SI)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "A(L)?(Y|SI)?$")>; def : InstRW<[FXU, LSU, Lat6], (instregex "AH(Y)?$")>; def : InstRW<[FXU], (instregex "AIH$")>; def : InstRW<[FXU], (instregex "AFI(Mux)?$")>; @@ -304,15 +313,17 @@ def : InstRW<[FXU], (instregex "AGHI(K)?$")>; def : InstRW<[FXU], (instregex "AGR(K)?$")>; def : InstRW<[FXU], (instregex "AHI(K)?$")>; def : InstRW<[FXU], (instregex "AHIMux(K)?$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "AL(Y)?$")>; def : InstRW<[FXU], (instregex "AL(FI|HSIK)$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "ALG(F)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "ALGF$")>; def : InstRW<[FXU], (instregex "ALGHSIK$")>; def : InstRW<[FXU], (instregex "ALGF(I|R)$")>; def : InstRW<[FXU], (instregex "ALGR(K)?$")>; def : InstRW<[FXU], (instregex "ALR(K)?$")>; def : InstRW<[FXU], (instregex "AR(K)?$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "AG(SI)?$")>; +def : InstRW<[FXU], (instregex "A(L)?HHHR$")>; +def : InstRW<[FXU, Lat2], (instregex "A(L)?HHLR$")>; +def : InstRW<[FXU], (instregex "ALSIH(N)?$")>; +def : InstRW<[FXU, LSU, Lat5], (instregex "A(L)?G(SI)?$")>; // Logical addition with carry def : InstRW<[FXU, LSU, Lat7, GroupAlone], (instregex "ALC(G)?$")>; @@ -335,6 +346,8 @@ def : InstRW<[FXU], (instregex "SLGF(I|R)$")>; def : InstRW<[FXU], (instregex "SLGR(K)?$")>; def : InstRW<[FXU], (instregex "SLR(K)?$")>; def : InstRW<[FXU], (instregex "SR(K)?$")>; +def : InstRW<[FXU], (instregex "S(L)?HHHR$")>; +def : InstRW<[FXU, Lat2], (instregex "S(L)?HHLR$")>; // Subtraction with borrow def : InstRW<[FXU, LSU, Lat7, GroupAlone], (instregex "SLB(G)?$")>; @@ -400,16 +413,22 @@ def : InstRW<[FXU, Lat6], (instregex "MS(R|FI)$")>; def : InstRW<[FXU, LSU, Lat12], (instregex "MSG$")>; def : InstRW<[FXU, Lat8], (instregex "MSGR$")>; def : InstRW<[FXU, Lat6], (instregex "MSGF(I|R)$")>; -def : InstRW<[FXU, LSU, Lat15, GroupAlone], (instregex "MLG$")>; -def : InstRW<[FXU, Lat9, GroupAlone], (instregex "MLGR$")>; +def : InstRW<[FXU, FXU, LSU, Lat15, GroupAlone], (instregex "MLG$")>; +def : InstRW<[FXU, FXU, Lat9, GroupAlone], (instregex "MLGR$")>; def : InstRW<[FXU, Lat5], (instregex "MGHI$")>; def : InstRW<[FXU, Lat5], (instregex "MHI$")>; def : InstRW<[FXU, LSU, Lat9], (instregex "MH(Y)?$")>; +def : InstRW<[FXU, FXU, Lat7, GroupAlone], (instregex "M(L)?R$")>; +def : InstRW<[FXU, FXU, LSU, Lat7, GroupAlone], (instregex "M(FY|L)?$")>; //===----------------------------------------------------------------------===// // Division and remainder //===----------------------------------------------------------------------===// +def : InstRW<[FPU2, FPU2, FXU, FXU, FXU, FXU, FXU, Lat30, GroupAlone], + (instregex "DR$")>; +def : InstRW<[FPU2, FPU2, LSU, FXU, FXU, FXU, FXU, Lat30, GroupAlone], + (instregex "D$")>; def : InstRW<[FPU2, FPU2, FXU, FXU, FXU, FXU, Lat30, GroupAlone], (instregex "DSG(F)?R$")>; def : InstRW<[FPU2, FPU2, LSU, FXU, FXU, FXU, Lat30, GroupAlone], @@ -426,7 +445,9 @@ def : InstRW<[FPU2, FPU2, LSU, FXU, FXU, FXU, FXU, Lat30, GroupAlone], def : InstRW<[FXU], (instregex "SLL(G|K)?$")>; def : InstRW<[FXU], (instregex "SRL(G|K)?$")>; def : InstRW<[FXU], (instregex "SRA(G|K)?$")>; -def : InstRW<[FXU], (instregex "SLA(K)?$")>; +def : InstRW<[FXU], (instregex "SLA(G|K)?$")>; +def : InstRW<[FXU, FXU, FXU, FXU, LSU, Lat8, GroupAlone], + (instregex "S(L|R)D(A|L)$")>; // Rotate def : InstRW<[FXU, LSU, Lat6], (instregex "RLL(G)?$")>; @@ -463,6 +484,8 @@ def : InstRW<[FXU], (instregex "CLIH$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "CLI(Y)?$")>; def : InstRW<[FXU], (instregex "CLR$")>; def : InstRW<[FXU, LSU, Lat5], (instregex "CLRL$")>; +def : InstRW<[FXU], (instregex "C(L)?HHR$")>; +def : InstRW<[FXU, Lat2], (instregex "C(L)?HLR$")>; // Compare halfword def : InstRW<[FXU, LSU, Lat6], (instregex "CH(Y|RL)?$")>; @@ -475,7 +498,7 @@ def : InstRW<[FXU, Lat2], (instregex "CGFR$")>; // Compare logical character def : InstRW<[FXU, LSU, LSU, Lat9, GroupAlone], (instregex "CLC$")>; - +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLCL(E|U)?$")>; def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CLST$")>; // Test under mask @@ -486,6 +509,9 @@ def : InstRW<[FXU], (instregex "TMHL(64)?$")>; def : InstRW<[FXU], (instregex "TMLH(64)?$")>; def : InstRW<[FXU], (instregex "TMLL(64)?$")>; +// Compare logical characters under mask +def : InstRW<[FXU, LSU, Lat5], (instregex "CLM(H|Y)?$")>; + //===----------------------------------------------------------------------===// // Prefetch and execution hint //===----------------------------------------------------------------------===// @@ -519,7 +545,7 @@ def : InstRW<[FXU, FXU, FXU, FXU, FXU, FXU, LSU, LSU, Lat12, GroupAlone], (instregex "CDSG$")>; // Compare and swap and store -def : InstRW<[FXU, Lat30, GroupAlone], (instregex "CSST$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "CSST$")>; // Perform locked operation def : InstRW<[LSU, Lat30, GroupAlone], (instregex "PLO$")>; @@ -532,6 +558,50 @@ def : InstRW<[FXU, FXU, LSU, LSU, Lat6, GroupAlone], (instregex "STPQ$")>; def : InstRW<[LSU, LSU, Lat5, GroupAlone], (instregex "LPD(G)?$")>; //===----------------------------------------------------------------------===// +// Translate and convert +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "TR$")>; +def : InstRW<[FXU, FXU, FXU, LSU, LSU, Lat30, GroupAlone], (instregex "TRT$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "TRTR$")>; +def : InstRW<[FXU, Lat30], (instregex "TR(TR)?(T)?(E|EOpt)?$")>; +def : InstRW<[LSU, Lat30], (instregex "TR(T|O)(T|O)(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "CU(12|14|21|24|41|42)(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "(CUUTF|CUTFU)(Opt)?$")>; + +//===----------------------------------------------------------------------===// +// Message-security assist +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "KM(C|F|O|CTR)?$")>; +def : InstRW<[FXU, Lat30], (instregex "(KIMD|KLMD|KMAC|PCC)$")>; + +//===----------------------------------------------------------------------===// +// Decimal arithmetic +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, DFU2, LSU, LSU, Lat30, GroupAlone], (instregex "CVBG$")>; +def : InstRW<[FXU, DFU, LSU, Lat30, GroupAlone], (instregex "CVB(Y)?$")>; +def : InstRW<[FXU, FXU, FXU, DFU2, DFU2, LSU, Lat30, GroupAlone], + (instregex "CVDG$")>; +def : InstRW<[FXU, FXU, DFU, LSU, Lat30, GroupAlone], (instregex "CVD(Y)?$")>; +def : InstRW<[LSU, Lat10, GroupAlone], (instregex "MVO$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MV(N|Z)$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "(PACK|PKA|PKU)$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "UNPK$")>; +def : InstRW<[LSU, Lat12, GroupAlone], (instregex "UNPK(A|U)$")>; + +def : InstRW<[FXU, DFU2, DFU2, LSU, LSU, Lat15, GroupAlone], + (instregex "(A|S|ZA)P$")>; +def : InstRW<[FXU, DFU2, DFU2, LSU, LSU, Lat30, GroupAlone], + (instregex "(M|D)P$")>; +def : InstRW<[FXU, FXU, DFU2, DFU2, LSU, LSU, LSU, Lat15, GroupAlone], + (instregex "SRP$")>; +def : InstRW<[DFU2, DFU2, LSU, LSU, Lat11, GroupAlone], (instregex "CP$")>; +def : InstRW<[DFU2, LSU, LSU, Lat5, GroupAlone], (instregex "TP$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "ED(MK)?$")>; + +//===----------------------------------------------------------------------===// // Access registers //===----------------------------------------------------------------------===// @@ -598,40 +668,29 @@ def : InstRW<[FXU], (instregex "PPA$")>; //===----------------------------------------------------------------------===// // Find leftmost one -def : InstRW<[FXU, Lat7, GroupAlone], (instregex "FLOGR$")>; +def : InstRW<[FXU, FXU, Lat7, GroupAlone], (instregex "FLOGR$")>; // Population count def : InstRW<[FXU, Lat3], (instregex "POPCNT$")>; // Extend -def : InstRW<[FXU], (instregex "AEXT128_64$")>; -def : InstRW<[FXU], (instregex "ZEXT128_(32|64)$")>; +def : InstRW<[FXU], (instregex "AEXT128$")>; +def : InstRW<[FXU], (instregex "ZEXT128$")>; // String instructions def : InstRW<[FXU, LSU, Lat30], (instregex "SRST$")>; +def : InstRW<[FXU, Lat30], (instregex "SRSTU$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "CUSE$")>; -// Move with key -def : InstRW<[LSU, Lat8, GroupAlone], (instregex "MVCK$")>; - -// Extract CPU Time -def : InstRW<[FXU, Lat5, LSU], (instregex "ECTG$")>; +// Various complex instructions +def : InstRW<[LSU, Lat30], (instregex "CFC$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "UPT$")>; +def : InstRW<[LSU, Lat30], (instregex "CKSM$")>; +def : InstRW<[FXU, Lat30], (instregex "CMPSC$")>; // Execute def : InstRW<[LSU, GroupAlone], (instregex "EX(RL)?$")>; -// Program return -def : InstRW<[FXU, Lat30], (instregex "PR$")>; - -// Inline assembly -def : InstRW<[FXU, LSU, LSU, Lat9, GroupAlone], (instregex "STCK(F)?$")>; -def : InstRW<[LSU, LSU, LSU, LSU, FXU, FXU, Lat20, GroupAlone], - (instregex "STCKE$")>; -def : InstRW<[FXU, LSU, Lat5], (instregex "STFLE$")>; -def : InstRW<[FXU, Lat30], (instregex "SVC$")>; - -// Store real address -def : InstRW<[FXU, LSU, Lat5], (instregex "STRAG$")>; - //===----------------------------------------------------------------------===// // .insn directive instructions //===----------------------------------------------------------------------===// @@ -768,9 +827,9 @@ def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "MXDBR$")>; def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "MXBR$")>; // Multiply and add / subtract -def : InstRW<[FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)EB$")>; def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)EBR$")>; -def : InstRW<[FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)DB$")>; def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)DBR$")>; // Division @@ -778,14 +837,17 @@ def : InstRW<[FPU, LSU, Lat30], (instregex "D(E|D)B$")>; def : InstRW<[FPU, Lat30], (instregex "D(E|D)BR$")>; def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "DXBR$")>; +// Divide to integer +def : InstRW<[FPU, Lat30], (instregex "DI(E|D)BR$")>; + //===----------------------------------------------------------------------===// // FP: Comparisons //===----------------------------------------------------------------------===// // Compare -def : InstRW<[FPU, LSU, Lat12], (instregex "C(E|D)B$")>; -def : InstRW<[FPU], (instregex "C(E|D)BR$")>; -def : InstRW<[FPU, FPU, Lat30], (instregex "CXBR$")>; +def : InstRW<[FPU, LSU, Lat12], (instregex "(K|C)(E|D)B$")>; +def : InstRW<[FPU], (instregex "(K|C)(E|D)BR$")>; +def : InstRW<[FPU, FPU, Lat30], (instregex "(K|C)XBR$")>; // Test Data Class def : InstRW<[FPU, LSU, Lat15], (instregex "TC(E|D)B$")>; @@ -798,10 +860,403 @@ def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "TCXB$")>; def : InstRW<[FXU, LSU, Lat4, GroupAlone], (instregex "EFPC$")>; def : InstRW<[LSU, Lat3, GroupAlone], (instregex "SFPC$")>; def : InstRW<[LSU, LSU, Lat6, GroupAlone], (instregex "LFPC$")>; -def : InstRW<[LSU, Lat3, GroupAlone], (instregex "STFPC$")>; -def : InstRW<[FXU, Lat30, GroupAlone], (instregex "SFASR$")>; -def : InstRW<[FXU, LSU, Lat30, GroupAlone], (instregex "LFAS$")>; -def : InstRW<[FXU, Lat2, GroupAlone], (instregex "SRNM(B|T)?$")>; +def : InstRW<[FXU, LSU, Lat3, GroupAlone], (instregex "STFPC$")>; +def : InstRW<[FXU, Lat30], (instregex "SFASR$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LFAS$")>; +def : InstRW<[FXU, GroupAlone], (instregex "SRNM(B|T)?$")>; + + +// --------------------- Hexadecimal floating point ------------------------- // + +//===----------------------------------------------------------------------===// +// HFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[FPU], (instregex "LT(D|E)R$")>; +def : InstRW<[FPU2, FPU2, Lat9, GroupAlone], (instregex "LTXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[FPU], (instregex "(LEDR|LRER)$")>; +def : InstRW<[FPU], (instregex "LEXR$")>; +def : InstRW<[FPU], (instregex "(LDXR|LRDR)$")>; + +// Load lengthened +def : InstRW<[LSU], (instregex "LDE$")>; +def : InstRW<[FXU], (instregex "LDER$")>; +def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "LX(D|E)$")>; +def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "LX(D|E)R$")>; + +// Convert from fixed +def : InstRW<[FXU, FPU, Lat9, GroupAlone], (instregex "CE(F|G)R$")>; +def : InstRW<[FXU, FPU, Lat9, GroupAlone], (instregex "CD(F|G)R$")>; +def : InstRW<[FXU, FPU2, FPU2, Lat11, GroupAlone], (instregex "CX(F|G)R$")>; + +// Convert to fixed +def : InstRW<[FXU, FPU, Lat12, GroupAlone], (instregex "CF(E|D)R$")>; +def : InstRW<[FXU, FPU, Lat12, GroupAlone], (instregex "CG(E|D)R$")>; +def : InstRW<[FXU, FPU, FPU, Lat20, GroupAlone], (instregex "C(F|G)XR$")>; + +// Convert BFP to HFP / HFP to BFP. +def : InstRW<[FPU], (instregex "THD(E)?R$")>; +def : InstRW<[FPU], (instregex "TB(E)?DR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load Complement / Negative / Positive +def : InstRW<[FPU], (instregex "L(C|N|P)DR$")>; +def : InstRW<[FPU], (instregex "L(C|N|P)ER$")>; +def : InstRW<[FPU2, FPU2, Lat9, GroupAlone], (instregex "L(C|N|P)XR$")>; + +// Halve +def : InstRW<[FPU], (instregex "H(E|D)R$")>; + +// Square root +def : InstRW<[FPU, LSU, Lat30], (instregex "SQ(E|D)$")>; +def : InstRW<[FPU, Lat30], (instregex "SQ(E|D)R$")>; +def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "SQXR$")>; + +// Load FP integer +def : InstRW<[FPU], (instregex "FIER$")>; +def : InstRW<[FPU], (instregex "FIDR$")>; +def : InstRW<[FPU2, FPU2, Lat15, GroupAlone], (instregex "FIXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[FPU, LSU, Lat12], (instregex "A(E|D|U|W)$")>; +def : InstRW<[FPU], (instregex "A(E|D|U|W)R$")>; +def : InstRW<[FPU2, FPU2, Lat20, GroupAlone], (instregex "AXR$")>; + +// Subtraction +def : InstRW<[FPU, LSU, Lat12], (instregex "S(E|D|U|W)$")>; +def : InstRW<[FPU], (instregex "S(E|D|U|W)R$")>; +def : InstRW<[FPU2, FPU2, Lat20, GroupAlone], (instregex "SXR$")>; + +// Multiply +def : InstRW<[FPU, LSU, Lat12], (instregex "M(D|DE|E|EE)$")>; +def : InstRW<[FPU], (instregex "M(D|DE|E|EE)R$")>; +def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "MXD$")>; +def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "MXDR$")>; +def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "MXR$")>; +def : InstRW<[FPU2, FPU2, LSU, Lat15, GroupAlone], (instregex "MY$")>; +def : InstRW<[FPU, FPU, LSU, Lat15, GroupAlone], (instregex "MY(H|L)$")>; +def : InstRW<[FPU2, FPU2, Lat10, GroupAlone], (instregex "MYR$")>; +def : InstRW<[FPU, Lat10, GroupAlone], (instregex "MY(H|L)R$")>; + +// Multiply and add / subtract +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)E$")>; +def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)ER$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "M(A|S)D$")>; +def : InstRW<[FPU, GroupAlone], (instregex "M(A|S)DR$")>; +def : InstRW<[FPU2, FPU2, LSU, GroupAlone], (instregex "MAY$")>; +def : InstRW<[FPU2, FPU2, GroupAlone], (instregex "MAYR$")>; +def : InstRW<[FPU, FPU, LSU, Lat12, GroupAlone], (instregex "MAY(H|L)$")>; +def : InstRW<[FPU, GroupAlone], (instregex "MAY(H|L)R$")>; + +// Division +def : InstRW<[FPU, LSU, Lat30], (instregex "D(E|D)$")>; +def : InstRW<[FPU, Lat30], (instregex "D(E|D)R$")>; +def : InstRW<[FPU2, FPU2, Lat30, GroupAlone], (instregex "DXR$")>; + +//===----------------------------------------------------------------------===// +// HFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[FPU, LSU, Lat12], (instregex "C(E|D)$")>; +def : InstRW<[FPU], (instregex "C(E|D)R$")>; +def : InstRW<[FPU, FPU, Lat15], (instregex "CXR$")>; + + +// ------------------------ Decimal floating point -------------------------- // + +//===----------------------------------------------------------------------===// +// DFP: Move instructions +//===----------------------------------------------------------------------===// + +// Load and Test +def : InstRW<[DFU, Lat20], (instregex "LTDTR$")>; +def : InstRW<[DFU2, DFU2, Lat20, GroupAlone], (instregex "LTXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Conversion instructions +//===----------------------------------------------------------------------===// + +// Load rounded +def : InstRW<[DFU, Lat30], (instregex "LEDTR$")>; +def : InstRW<[DFU, DFU, Lat30], (instregex "LDXTR$")>; + +// Load lengthened +def : InstRW<[DFU, Lat20], (instregex "LDETR$")>; +def : InstRW<[DFU2, DFU2, Lat20, GroupAlone], (instregex "LXDTR$")>; + +// Convert from fixed / logical +def : InstRW<[FXU, DFU, Lat9, GroupAlone], (instregex "CDFTR$")>; +def : InstRW<[FXU, DFU, Lat30, GroupAlone], (instregex "CDGTR(A)?$")>; +def : InstRW<[FXU, DFU2, DFU2, GroupAlone], (instregex "CXFTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat30, GroupAlone], (instregex "CXGTR(A)?$")>; +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "CDL(F|G)TR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat11, GroupAlone], (instregex "CXLFTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat6, GroupAlone], (instregex "CXLGTR$")>; + +// Convert to fixed / logical +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "CFDTR(A)?$")>; +def : InstRW<[FXU, DFU, Lat30, GroupAlone], (instregex "CGDTR(A)?$")>; +def : InstRW<[FXU, DFU, DFU, Lat11, GroupAlone], (instregex "CFXTR$")>; +def : InstRW<[FXU, DFU, DFU, Lat30, GroupAlone], (instregex "CGXTR(A)?$")>; +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "CL(F|G)DTR$")>; +def : InstRW<[FXU, DFU, DFU, Lat11, GroupAlone], (instregex "CL(F|G)XTR$")>; + +// Convert from / to signed / unsigned packed +def : InstRW<[FXU, DFU, Lat12, GroupAlone], (instregex "CD(S|U)TR$")>; +def : InstRW<[FXU, FXU, DFU2, DFU2, Lat20, GroupAlone], (instregex "CX(S|U)TR$")>; +def : InstRW<[FXU, DFU, Lat12, GroupAlone], (instregex "C(S|U)DTR$")>; +def : InstRW<[FXU, FXU, DFU2, DFU2, Lat20, GroupAlone], (instregex "C(S|U)XTR$")>; + +// Convert from / to zoned +def : InstRW<[LSU, DFU2, Lat7, GroupAlone], (instregex "CDZT$")>; +def : InstRW<[LSU, LSU, DFU2, DFU2, Lat10, GroupAlone], (instregex "CXZT$")>; +def : InstRW<[FXU, LSU, DFU, DFU, Lat11, GroupAlone], (instregex "CZDT$")>; +def : InstRW<[FXU, LSU, DFU, DFU, Lat15, GroupAlone], (instregex "CZXT$")>; + +// Perform floating-point operation +def : InstRW<[FXU, Lat30], (instregex "PFPO$")>; + +//===----------------------------------------------------------------------===// +// DFP: Unary arithmetic +//===----------------------------------------------------------------------===// + +// Load FP integer +def : InstRW<[DFU, Lat20], (instregex "FIDTR$")>; +def : InstRW<[DFU2, DFU2, Lat20, GroupAlone], (instregex "FIXTR$")>; + +// Extract biased exponent +def : InstRW<[FXU, DFU, Lat15, GroupAlone], (instregex "EEDTR$")>; +def : InstRW<[FXU, DFU2, Lat15, GroupAlone], (instregex "EEXTR$")>; + +// Extract significance +def : InstRW<[FXU, DFU, Lat15, GroupAlone], (instregex "ESDTR$")>; +def : InstRW<[FXU, DFU, DFU, Lat20, GroupAlone], (instregex "ESXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Binary arithmetic +//===----------------------------------------------------------------------===// + +// Addition +def : InstRW<[DFU, Lat30], (instregex "ADTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "AXTR(A)?$")>; + +// Subtraction +def : InstRW<[DFU, Lat30], (instregex "SDTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "SXTR(A)?$")>; + +// Multiply +def : InstRW<[DFU, Lat30], (instregex "MDTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "MXTR(A)?$")>; + +// Division +def : InstRW<[DFU, Lat30], (instregex "DDTR(A)?$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "DXTR(A)?$")>; + +// Quantize +def : InstRW<[DFU, Lat30], (instregex "QADTR$")>; +def : InstRW<[DFU2, DFU2, Lat30, GroupAlone], (instregex "QAXTR$")>; + +// Reround +def : InstRW<[FXU, DFU, Lat30, GroupAlone], (instregex "RRDTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat30, GroupAlone], (instregex "RRXTR$")>; + +// Shift significand left/right +def : InstRW<[LSU, DFU, Lat11, GroupAlone], (instregex "S(L|R)DT$")>; +def : InstRW<[LSU, DFU2, DFU2, Lat15, GroupAlone], (instregex "S(L|R)XT$")>; + +// Insert biased exponent +def : InstRW<[FXU, DFU, Lat11, GroupAlone], (instregex "IEDTR$")>; +def : InstRW<[FXU, DFU2, DFU2, Lat15, GroupAlone], (instregex "IEXTR$")>; + +//===----------------------------------------------------------------------===// +// DFP: Comparisons +//===----------------------------------------------------------------------===// + +// Compare +def : InstRW<[DFU, Lat11], (instregex "(K|C)DTR$")>; +def : InstRW<[DFU, DFU, Lat15], (instregex "(K|C)XTR$")>; + +// Compare biased exponent +def : InstRW<[DFU, Lat8], (instregex "CEDTR$")>; +def : InstRW<[DFU, DFU, Lat9], (instregex "CEXTR$")>; + +// Test Data Class/Group +def : InstRW<[LSU, DFU, Lat15], (instregex "TD(C|G)(E|D)T$")>; +def : InstRW<[LSU, DFU2, Lat15], (instregex "TD(C|G)XT$")>; + + +// -------------------------------- System ---------------------------------- // + +//===----------------------------------------------------------------------===// +// System: Program-Status Word Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "EPSW$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LPSW(E)?$")>; +def : InstRW<[FXU, Lat3, GroupAlone], (instregex "IPK$")>; +def : InstRW<[LSU, EndGroup], (instregex "SPKA$")>; +def : InstRW<[LSU, EndGroup], (instregex "SSM$")>; +def : InstRW<[FXU, LSU, GroupAlone], (instregex "ST(N|O)SM$")>; +def : InstRW<[FXU, Lat3], (instregex "IAC$")>; +def : InstRW<[LSU, EndGroup], (instregex "SAC(F)?$")>; + +//===----------------------------------------------------------------------===// +// System: Control Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "LCTL(G)?$")>; +def : InstRW<[FXU, LSU, LSU, LSU, LSU, Lat30, GroupAlone], + (instregex "STCT(L|G)$")>; +def : InstRW<[LSU], (instregex "E(P|S)A(I)?R$")>; +def : InstRW<[FXU, Lat30], (instregex "SSA(I)?R$")>; +def : InstRW<[FXU, Lat30], (instregex "ESEA$")>; + +//===----------------------------------------------------------------------===// +// System: Prefix-Register Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "SPX$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STPX$")>; + +//===----------------------------------------------------------------------===// +// System: Storage-Key and Real Memory Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "ISKE$")>; +def : InstRW<[FXU, Lat30], (instregex "IVSK$")>; +def : InstRW<[FXU, Lat30], (instregex "SSKE(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "RRB(E|M)$")>; +def : InstRW<[FXU, Lat30], (instregex "PFMF$")>; +def : InstRW<[FXU, Lat30], (instregex "TB$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "PGIN$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "PGOUT$")>; + +//===----------------------------------------------------------------------===// +// System: Dynamic-Address-Translation Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "IPTE(Opt)?(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "IDTE(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "CRDTE(Opt)?$")>; +def : InstRW<[FXU, Lat30], (instregex "PTLB$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "CSP(G)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LPTEA$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LRA(Y|G)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STRAG$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "LURA(G)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STUR(A|G)$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "TPROT$")>; + +//===----------------------------------------------------------------------===// +// System: Memory-move Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[LSU, Lat8, GroupAlone], (instregex "MVC(K|P|S)$")>; +def : InstRW<[LSU, Lat6, Lat30, GroupAlone], (instregex "MVCSK$")>; +def : InstRW<[LSU, Lat6, GroupAlone], (instregex "MVCDK$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "MVCOS$")>; +def : InstRW<[LSU, Lat30, GroupAlone], (instregex "MVPG$")>; + +//===----------------------------------------------------------------------===// +// System: Address-Space Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "LASP$")>; +def : InstRW<[LSU, GroupAlone], (instregex "PALB$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "PC$")>; +def : InstRW<[FXU, Lat30], (instregex "PR$")>; +def : InstRW<[FXU, Lat30], (instregex "PT(I)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "RP$")>; +def : InstRW<[FXU, Lat30], (instregex "BS(G|A)$")>; +def : InstRW<[FXU, Lat20], (instregex "TAR$")>; + +//===----------------------------------------------------------------------===// +// System: Linkage-Stack Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30, EndGroup], (instregex "BAKR$")>; +def : InstRW<[FXU, Lat30], (instregex "EREG(G)?$")>; +def : InstRW<[FXU, Lat30], (instregex "(E|M)STA$")>; + +//===----------------------------------------------------------------------===// +// System: Time-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "PTFF$")>; +def : InstRW<[FXU, LSU, Lat20], (instregex "SCK$")>; +def : InstRW<[FXU, Lat30], (instregex "SCKPF$")>; +def : InstRW<[FXU, LSU, Lat20], (instregex "SCKC$")>; +def : InstRW<[FXU, LSU, Lat20], (instregex "SPT$")>; +def : InstRW<[FXU, LSU, LSU, Lat9, GroupAlone], (instregex "STCK(F)?$")>; +def : InstRW<[LSU, LSU, LSU, LSU, FXU, FXU, Lat20, GroupAlone], + (instregex "STCKE$")>; +def : InstRW<[FXU, LSU, Lat9], (instregex "STCKC$")>; +def : InstRW<[FXU, LSU, Lat8], (instregex "STPT$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Related Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, LSU, Lat30], (instregex "STAP$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STIDP$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STSI$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STFL(E)?$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "ECAG$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "ECTG$")>; +def : InstRW<[FXU, Lat30], (instregex "PTF$")>; +def : InstRW<[FXU, Lat30], (instregex "PCKMO$")>; + +//===----------------------------------------------------------------------===// +// System: Miscellaneous Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "SVC$")>; +def : InstRW<[FXU, GroupAlone], (instregex "MC$")>; +def : InstRW<[FXU, Lat30], (instregex "DIAG$")>; +def : InstRW<[FXU], (instregex "TRAC(E|G)$")>; +def : InstRW<[FXU, Lat30], (instregex "TRAP(2|4)$")>; +def : InstRW<[FXU, Lat30], (instregex "SIGP$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "SIGA$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "SIE$")>; + +//===----------------------------------------------------------------------===// +// System: CPU-Measurement Facility Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU], (instregex "LPP$")>; +def : InstRW<[FXU, Lat30], (instregex "ECPGA$")>; +def : InstRW<[FXU, Lat30], (instregex "E(C|P)CTR$")>; +def : InstRW<[FXU, Lat30], (instregex "LCCTL$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "L(P|S)CTL$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "Q(S|CTR)I$")>; +def : InstRW<[FXU, Lat30], (instregex "S(C|P)CTR$")>; + +//===----------------------------------------------------------------------===// +// System: I/O Instructions +//===----------------------------------------------------------------------===// + +def : InstRW<[FXU, Lat30], (instregex "(C|H|R|X)SCH$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "(M|S|ST|T)SCH$")>; +def : InstRW<[FXU, Lat30], (instregex "RCHP$")>; +def : InstRW<[FXU, Lat30], (instregex "SCHM$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "STC(PS|RW)$")>; +def : InstRW<[FXU, LSU, Lat30], (instregex "TPI$")>; +def : InstRW<[FXU, Lat30], (instregex "SAL$")>; } diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp index 83882fc..13ceb37 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp @@ -14,9 +14,9 @@ //===----------------------------------------------------------------------===// #include "SystemZTargetMachine.h" +#include "llvm/CodeGen/LivePhysRegs.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/LivePhysRegs.h" #include "llvm/Target/TargetRegisterInfo.h" using namespace llvm; @@ -167,10 +167,10 @@ bool SystemZShortenInst::shortenFPConv(MachineInstr &MI, unsigned Opcode) { MI.RemoveOperand(0); MI.setDesc(TII->get(Opcode)); MachineInstrBuilder(*MI.getParent()->getParent(), &MI) - .addOperand(Dest) - .addOperand(Mode) - .addOperand(Src) - .addOperand(Suppress); + .add(Dest) + .add(Mode) + .add(Src) + .add(Suppress); return true; } return false; @@ -200,14 +200,26 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) { Changed |= shortenOn001AddCC(MI, SystemZ::ADBR); break; + case SystemZ::WFASB: + Changed |= shortenOn001AddCC(MI, SystemZ::AEBR); + break; + case SystemZ::WFDDB: Changed |= shortenOn001(MI, SystemZ::DDBR); break; + case SystemZ::WFDSB: + Changed |= shortenOn001(MI, SystemZ::DEBR); + break; + case SystemZ::WFIDB: Changed |= shortenFPConv(MI, SystemZ::FIDBRA); break; + case SystemZ::WFISB: + Changed |= shortenFPConv(MI, SystemZ::FIEBRA); + break; + case SystemZ::WLDEB: Changed |= shortenOn01(MI, SystemZ::LDEBR); break; @@ -220,30 +232,58 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) { Changed |= shortenOn001(MI, SystemZ::MDBR); break; + case SystemZ::WFMSB: + Changed |= shortenOn001(MI, SystemZ::MEEBR); + break; + case SystemZ::WFLCDB: Changed |= shortenOn01(MI, SystemZ::LCDFR); break; + case SystemZ::WFLCSB: + Changed |= shortenOn01(MI, SystemZ::LCDFR_32); + break; + case SystemZ::WFLNDB: Changed |= shortenOn01(MI, SystemZ::LNDFR); break; + case SystemZ::WFLNSB: + Changed |= shortenOn01(MI, SystemZ::LNDFR_32); + break; + case SystemZ::WFLPDB: Changed |= shortenOn01(MI, SystemZ::LPDFR); break; + case SystemZ::WFLPSB: + Changed |= shortenOn01(MI, SystemZ::LPDFR_32); + break; + case SystemZ::WFSQDB: Changed |= shortenOn01(MI, SystemZ::SQDBR); break; + case SystemZ::WFSQSB: + Changed |= shortenOn01(MI, SystemZ::SQEBR); + break; + case SystemZ::WFSDB: Changed |= shortenOn001AddCC(MI, SystemZ::SDBR); break; + case SystemZ::WFSSB: + Changed |= shortenOn001AddCC(MI, SystemZ::SEBR); + break; + case SystemZ::WFCDB: Changed |= shortenOn01(MI, SystemZ::CDBR); break; + case SystemZ::WFCSB: + Changed |= shortenOn01(MI, SystemZ::CEBR); + break; + case SystemZ::VL32: // For z13 we prefer LDE over LE to avoid partial register dependencies. Changed |= shortenOn0(MI, SystemZ::LDE32); diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp index ce07ea3..9cd09b0 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp @@ -37,12 +37,20 @@ SystemZSubtarget::SystemZSubtarget(const Triple &TT, const std::string &CPU, const TargetMachine &TM) : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false), HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false), - HasPopulationCount(false), HasFastSerialization(false), - HasInterlockedAccess1(false), HasMiscellaneousExtensions(false), + HasPopulationCount(false), HasMessageSecurityAssist3(false), + HasMessageSecurityAssist4(false), HasResetReferenceBitsMultiple(false), + HasFastSerialization(false), HasInterlockedAccess1(false), + HasMiscellaneousExtensions(false), HasExecutionHint(false), HasLoadAndTrap(false), HasTransactionalExecution(false), HasProcessorAssist(false), + HasDFPZonedConversion(false), HasEnhancedDAT2(false), HasVector(false), HasLoadStoreOnCond2(false), - HasLoadAndZeroRightmostByte(false), + HasLoadAndZeroRightmostByte(false), HasMessageSecurityAssist5(false), + HasDFPPackedConversion(false), + HasMiscellaneousExtensions2(false), HasGuardedStorage(false), + HasMessageSecurityAssist7(false), HasMessageSecurityAssist8(false), + HasVectorEnhancements1(false), HasVectorPackedDecimal(false), + HasInsertReferenceBitsMultiple(false), TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this), TSInfo(), FrameLowering() {} diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h index cdb6132..4829f73 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h +++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h @@ -19,8 +19,8 @@ #include "SystemZInstrInfo.h" #include "SystemZRegisterInfo.h" #include "SystemZSelectionDAGInfo.h" -#include "llvm/IR/DataLayout.h" #include "llvm/ADT/Triple.h" +#include "llvm/IR/DataLayout.h" #include "llvm/Target/TargetSubtargetInfo.h" #include <string> @@ -39,6 +39,9 @@ protected: bool HasHighWord; bool HasFPExtension; bool HasPopulationCount; + bool HasMessageSecurityAssist3; + bool HasMessageSecurityAssist4; + bool HasResetReferenceBitsMultiple; bool HasFastSerialization; bool HasInterlockedAccess1; bool HasMiscellaneousExtensions; @@ -46,9 +49,20 @@ protected: bool HasLoadAndTrap; bool HasTransactionalExecution; bool HasProcessorAssist; + bool HasDFPZonedConversion; + bool HasEnhancedDAT2; bool HasVector; bool HasLoadStoreOnCond2; bool HasLoadAndZeroRightmostByte; + bool HasMessageSecurityAssist5; + bool HasDFPPackedConversion; + bool HasMiscellaneousExtensions2; + bool HasGuardedStorage; + bool HasMessageSecurityAssist7; + bool HasMessageSecurityAssist8; + bool HasVectorEnhancements1; + bool HasVectorPackedDecimal; + bool HasInsertReferenceBitsMultiple; private: Triple TargetTriple; @@ -104,6 +118,19 @@ public: // Return true if the target has the population-count facility. bool hasPopulationCount() const { return HasPopulationCount; } + // Return true if the target has the message-security-assist + // extension facility 3. + bool hasMessageSecurityAssist3() const { return HasMessageSecurityAssist3; } + + // Return true if the target has the message-security-assist + // extension facility 4. + bool hasMessageSecurityAssist4() const { return HasMessageSecurityAssist4; } + + // Return true if the target has the reset-reference-bits-multiple facility. + bool hasResetReferenceBitsMultiple() const { + return HasResetReferenceBitsMultiple; + } + // Return true if the target has the fast-serialization facility. bool hasFastSerialization() const { return HasFastSerialization; } @@ -127,14 +154,54 @@ public: // Return true if the target has the processor-assist facility. bool hasProcessorAssist() const { return HasProcessorAssist; } + // Return true if the target has the DFP zoned-conversion facility. + bool hasDFPZonedConversion() const { return HasDFPZonedConversion; } + + // Return true if the target has the enhanced-DAT facility 2. + bool hasEnhancedDAT2() const { return HasEnhancedDAT2; } + // Return true if the target has the load-and-zero-rightmost-byte facility. bool hasLoadAndZeroRightmostByte() const { return HasLoadAndZeroRightmostByte; } + // Return true if the target has the message-security-assist + // extension facility 5. + bool hasMessageSecurityAssist5() const { return HasMessageSecurityAssist5; } + + // Return true if the target has the DFP packed-conversion facility. + bool hasDFPPackedConversion() const { return HasDFPPackedConversion; } + // Return true if the target has the vector facility. bool hasVector() const { return HasVector; } + // Return true if the target has the miscellaneous-extensions facility 2. + bool hasMiscellaneousExtensions2() const { + return HasMiscellaneousExtensions2; + } + + // Return true if the target has the guarded-storage facility. + bool hasGuardedStorage() const { return HasGuardedStorage; } + + // Return true if the target has the message-security-assist + // extension facility 7. + bool hasMessageSecurityAssist7() const { return HasMessageSecurityAssist7; } + + // Return true if the target has the message-security-assist + // extension facility 8. + bool hasMessageSecurityAssist8() const { return HasMessageSecurityAssist8; } + + // Return true if the target has the vector-enhancements facility 1. + bool hasVectorEnhancements1() const { return HasVectorEnhancements1; } + + // Return true if the target has the vector-packed-decimal facility. + bool hasVectorPackedDecimal() const { return HasVectorPackedDecimal; } + + // Return true if the target has the insert-reference-bits-multiple facility. + bool hasInsertReferenceBitsMultiple() const { + return HasInsertReferenceBitsMultiple; + } + // Return true if GV can be accessed using LARL for reloc model RM // and code model CM. bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const; diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTDC.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTDC.cpp index 96a9ef8..5dbd23d 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZTDC.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZTDC.cpp @@ -47,10 +47,10 @@ #include "SystemZ.h" #include "llvm/ADT/MapVector.h" #include "llvm/IR/Constants.h" -#include "llvm/IR/Instructions.h" +#include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" -#include "llvm/IR/IRBuilder.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include <deque> diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp index 33fdb8f..025bf73 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp @@ -8,13 +8,24 @@ //===----------------------------------------------------------------------===// #include "SystemZTargetMachine.h" -#include "SystemZTargetTransformInfo.h" +#include "MCTargetDesc/SystemZMCTargetDesc.h" +#include "SystemZ.h" #include "SystemZMachineScheduler.h" +#include "SystemZTargetTransformInfo.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" #include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/Support/CodeGen.h" #include "llvm/Support/TargetRegistry.h" +#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Transforms/Scalar.h" -#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include <string> using namespace llvm; @@ -48,7 +59,7 @@ static bool UsesVectorABI(StringRef CPU, StringRef FS) { static std::string computeDataLayout(const Triple &TT, StringRef CPU, StringRef FS) { bool VectorABI = UsesVectorABI(CPU, FS); - std::string Ret = ""; + std::string Ret; // Big endian. Ret += "E"; @@ -96,18 +107,19 @@ SystemZTargetMachine::SystemZTargetMachine(const Target &T, const Triple &TT, CodeGenOpt::Level OL) : LLVMTargetMachine(T, computeDataLayout(TT, CPU, FS), TT, CPU, FS, Options, getEffectiveRelocModel(RM), CM, OL), - TLOF(make_unique<TargetLoweringObjectFileELF>()), + TLOF(llvm::make_unique<TargetLoweringObjectFileELF>()), Subtarget(TT, CPU, FS, *this) { initAsmInfo(); } -SystemZTargetMachine::~SystemZTargetMachine() {} +SystemZTargetMachine::~SystemZTargetMachine() = default; namespace { + /// SystemZ Code Generator Pass Configuration Options. class SystemZPassConfig : public TargetPassConfig { public: - SystemZPassConfig(SystemZTargetMachine *TM, PassManagerBase &PM) + SystemZPassConfig(SystemZTargetMachine &TM, PassManagerBase &PM) : TargetPassConfig(TM, PM) {} SystemZTargetMachine &getSystemZTargetMachine() const { @@ -116,7 +128,8 @@ public: ScheduleDAGInstrs * createPostMachineScheduler(MachineSchedContext *C) const override { - return new ScheduleDAGMI(C, make_unique<SystemZPostRASchedStrategy>(C), + return new ScheduleDAGMI(C, + llvm::make_unique<SystemZPostRASchedStrategy>(C), /*RemoveKillFlags=*/true); } @@ -126,11 +139,14 @@ public: void addPreSched2() override; void addPreEmitPass() override; }; + } // end anonymous namespace void SystemZPassConfig::addIRPasses() { - if (getOptLevel() != CodeGenOpt::None) + if (getOptLevel() != CodeGenOpt::None) { addPass(createSystemZTDCPass()); + addPass(createLoopDataPrefetchPass()); + } TargetPassConfig::addIRPasses(); } @@ -157,7 +173,6 @@ void SystemZPassConfig::addPreSched2() { } void SystemZPassConfig::addPreEmitPass() { - // Do instruction shortening before compare elimination because some // vector instructions will be shortened into opcodes that compare // elimination recognizes. @@ -199,7 +214,7 @@ void SystemZPassConfig::addPreEmitPass() { } TargetPassConfig *SystemZTargetMachine::createPassConfig(PassManagerBase &PM) { - return new SystemZPassConfig(this, PM); + return new SystemZPassConfig(*this, PM); } TargetIRAnalysis SystemZTargetMachine::getTargetIRAnalysis() { diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h index 69cf9bc..a10ca64 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h +++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h @@ -1,4 +1,4 @@ -//==- SystemZTargetMachine.h - Define TargetMachine for SystemZ ---*- C++ -*-=// +//=- SystemZTargetMachine.h - Define TargetMachine for SystemZ ----*- C++ -*-=// // // The LLVM Compiler Infrastructure // @@ -16,15 +16,18 @@ #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZTARGETMACHINE_H #include "SystemZSubtarget.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Support/CodeGen.h" #include "llvm/Target/TargetMachine.h" +#include <memory> namespace llvm { -class TargetFrameLowering; - class SystemZTargetMachine : public LLVMTargetMachine { std::unique_ptr<TargetLoweringObjectFile> TLOF; - SystemZSubtarget Subtarget; + SystemZSubtarget Subtarget; public: SystemZTargetMachine(const Target &T, const Triple &TT, StringRef CPU, @@ -34,20 +37,22 @@ public: ~SystemZTargetMachine() override; const SystemZSubtarget *getSubtargetImpl() const { return &Subtarget; } + const SystemZSubtarget *getSubtargetImpl(const Function &) const override { return &Subtarget; } + // Override LLVMTargetMachine TargetPassConfig *createPassConfig(PassManagerBase &PM) override; TargetIRAnalysis getTargetIRAnalysis() override; + TargetLoweringObjectFile *getObjFileLowering() const override { return TLOF.get(); } bool targetSchedulesPostRAScheduling() const override { return true; }; - }; } // end namespace llvm -#endif +#endif // LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZTARGETMACHINE_H diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp index b10c0e0..506dc74 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -238,7 +238,7 @@ SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) { return TTI::PSK_Software; } -void SystemZTTIImpl::getUnrollingPreferences(Loop *L, +void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP) { // Find out if L contains a call, what the machine instruction count // estimate is, and how many stores there are. @@ -259,11 +259,8 @@ void SystemZTTIImpl::getUnrollingPreferences(Loop *L, } } if (isa<StoreInst>(&I)) { - NumStores++; Type *MemAccessTy = I.getOperand(0)->getType(); - if((MemAccessTy->isIntegerTy() || MemAccessTy->isFloatingPointTy()) && - (getDataLayout().getTypeSizeInBits(MemAccessTy) == 128)) - NumStores++; // 128 bit fp/int stores get split. + NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, 0, 0); } } @@ -305,7 +302,7 @@ unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector) { return 0; } -unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) { +unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const { if (!Vector) return 64; if (ST->hasVector()) @@ -313,3 +310,581 @@ unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) { return 0; } +int SystemZTTIImpl::getArithmeticInstrCost( + unsigned Opcode, Type *Ty, + TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, + TTI::OperandValueProperties Opd1PropInfo, + TTI::OperandValueProperties Opd2PropInfo, + ArrayRef<const Value *> Args) { + + // TODO: return a good value for BB-VECTORIZER that includes the + // immediate loads, which we do not want to count for the loop + // vectorizer, since they are hopefully hoisted out of the loop. This + // would require a new parameter 'InLoop', but not sure if constant + // args are common enough to motivate this. + + unsigned ScalarBits = Ty->getScalarSizeInBits(); + + // Div with a constant which is a power of 2 will be converted by + // DAGCombiner to use shifts. With vector shift-element instructions, a + // vector sdiv costs about as much as a scalar one. + const unsigned SDivCostEstimate = 4; + bool SDivPow2 = false; + bool UDivPow2 = false; + if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv) && + Args.size() == 2) { + const ConstantInt *CI = nullptr; + if (const Constant *C = dyn_cast<Constant>(Args[1])) { + if (C->getType()->isVectorTy()) + CI = dyn_cast_or_null<const ConstantInt>(C->getSplatValue()); + else + CI = dyn_cast<const ConstantInt>(C); + } + if (CI != nullptr && + (CI->getValue().isPowerOf2() || (-CI->getValue()).isPowerOf2())) { + if (Opcode == Instruction::SDiv) + SDivPow2 = true; + else + UDivPow2 = true; + } + } + + if (Ty->isVectorTy()) { + assert (ST->hasVector() && "getArithmeticInstrCost() called with vector type."); + unsigned VF = Ty->getVectorNumElements(); + unsigned NumVectors = getNumberOfParts(Ty); + + // These vector operations are custom handled, but are still supported + // with one instruction per vector, regardless of element size. + if (Opcode == Instruction::Shl || Opcode == Instruction::LShr || + Opcode == Instruction::AShr || UDivPow2) { + return NumVectors; + } + + if (SDivPow2) + return (NumVectors * SDivCostEstimate); + + // These FP operations are supported with a single vector instruction for + // double (base implementation assumes float generally costs 2). For + // FP128, the scalar cost is 1, and there is no overhead since the values + // are already in scalar registers. + if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub || + Opcode == Instruction::FMul || Opcode == Instruction::FDiv) { + switch (ScalarBits) { + case 32: { + // The vector enhancements facility 1 provides v4f32 instructions. + if (ST->hasVectorEnhancements1()) + return NumVectors; + // Return the cost of multiple scalar invocation plus the cost of + // inserting and extracting the values. + unsigned ScalarCost = getArithmeticInstrCost(Opcode, Ty->getScalarType()); + unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(Ty, Args); + // FIXME: VF 2 for these FP operations are currently just as + // expensive as for VF 4. + if (VF == 2) + Cost *= 2; + return Cost; + } + case 64: + case 128: + return NumVectors; + default: + break; + } + } + + // There is no native support for FRem. + if (Opcode == Instruction::FRem) { + unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(Ty, Args); + // FIXME: VF 2 for float is currently just as expensive as for VF 4. + if (VF == 2 && ScalarBits == 32) + Cost *= 2; + return Cost; + } + } + else { // Scalar: + // These FP operations are supported with a dedicated instruction for + // float, double and fp128 (base implementation assumes float generally + // costs 2). + if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub || + Opcode == Instruction::FMul || Opcode == Instruction::FDiv) + return 1; + + // There is no native support for FRem. + if (Opcode == Instruction::FRem) + return LIBCALL_COST; + + if (Opcode == Instruction::LShr || Opcode == Instruction::AShr) + return (ScalarBits >= 32 ? 1 : 2 /*ext*/); + + // Or requires one instruction, although it has custom handling for i64. + if (Opcode == Instruction::Or) + return 1; + + if (Opcode == Instruction::Xor && ScalarBits == 1) + // 2 * ipm sequences ; xor ; shift ; compare + return 7; + + if (UDivPow2) + return 1; + if (SDivPow2) + return SDivCostEstimate; + + // An extra extension for narrow types is needed. + if ((Opcode == Instruction::SDiv || Opcode == Instruction::SRem)) + // sext of op(s) for narrow types + return (ScalarBits < 32 ? 4 : (ScalarBits == 32 ? 2 : 1)); + + if (Opcode == Instruction::UDiv || Opcode == Instruction::URem) + // Clearing of low 64 bit reg + sext of op(s) for narrow types + dl[g]r + return (ScalarBits < 32 ? 4 : 2); + } + + // Fallback to the default implementation. + return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, + Opd1PropInfo, Opd2PropInfo, Args); +} + + +int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, + Type *SubTp) { + assert (Tp->isVectorTy()); + assert (ST->hasVector() && "getShuffleCost() called."); + unsigned NumVectors = getNumberOfParts(Tp); + + // TODO: Since fp32 is expanded, the shuffle cost should always be 0. + + // FP128 values are always in scalar registers, so there is no work + // involved with a shuffle, except for broadcast. In that case register + // moves are done with a single instruction per element. + if (Tp->getScalarType()->isFP128Ty()) + return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0); + + switch (Kind) { + case TargetTransformInfo::SK_ExtractSubvector: + // ExtractSubvector Index indicates start offset. + + // Extracting a subvector from first index is a noop. + return (Index == 0 ? 0 : NumVectors); + + case TargetTransformInfo::SK_Broadcast: + // Loop vectorizer calls here to figure out the extra cost of + // broadcasting a loaded value to all elements of a vector. Since vlrep + // loads and replicates with a single instruction, adjust the returned + // value. + return NumVectors - 1; + + default: + + // SystemZ supports single instruction permutation / replication. + return NumVectors; + } + + return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); +} + +// Return the log2 difference of the element sizes of the two vector types. +static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) { + unsigned Bits0 = Ty0->getScalarSizeInBits(); + unsigned Bits1 = Ty1->getScalarSizeInBits(); + + if (Bits1 > Bits0) + return (Log2_32(Bits1) - Log2_32(Bits0)); + + return (Log2_32(Bits0) - Log2_32(Bits1)); +} + +// Return the number of instructions needed to truncate SrcTy to DstTy. +unsigned SystemZTTIImpl:: +getVectorTruncCost(Type *SrcTy, Type *DstTy) { + assert (SrcTy->isVectorTy() && DstTy->isVectorTy()); + assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() && + "Packing must reduce size of vector type."); + assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() && + "Packing should not change number of elements."); + + // TODO: Since fp32 is expanded, the extract cost should always be 0. + + unsigned NumParts = getNumberOfParts(SrcTy); + if (NumParts <= 2) + // Up to 2 vector registers can be truncated efficiently with pack or + // permute. The latter requires an immediate mask to be loaded, which + // typically gets hoisted out of a loop. TODO: return a good value for + // BB-VECTORIZER that includes the immediate loads, which we do not want + // to count for the loop vectorizer. + return 1; + + unsigned Cost = 0; + unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy); + unsigned VF = SrcTy->getVectorNumElements(); + for (unsigned P = 0; P < Log2Diff; ++P) { + if (NumParts > 1) + NumParts /= 2; + Cost += NumParts; + } + + // Currently, a general mix of permutes and pack instructions is output by + // isel, which follow the cost computation above except for this case which + // is one instruction less: + if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 && + DstTy->getScalarSizeInBits() == 8) + Cost--; + + return Cost; +} + +// Return the cost of converting a vector bitmask produced by a compare +// (SrcTy), to the type of the select or extend instruction (DstTy). +unsigned SystemZTTIImpl:: +getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) { + assert (SrcTy->isVectorTy() && DstTy->isVectorTy() && + "Should only be called with vector types."); + + unsigned PackCost = 0; + unsigned SrcScalarBits = SrcTy->getScalarSizeInBits(); + unsigned DstScalarBits = DstTy->getScalarSizeInBits(); + unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy); + if (SrcScalarBits > DstScalarBits) + // The bitmask will be truncated. + PackCost = getVectorTruncCost(SrcTy, DstTy); + else if (SrcScalarBits < DstScalarBits) { + unsigned DstNumParts = getNumberOfParts(DstTy); + // Each vector select needs its part of the bitmask unpacked. + PackCost = Log2Diff * DstNumParts; + // Extra cost for moving part of mask before unpacking. + PackCost += DstNumParts - 1; + } + + return PackCost; +} + +// Return the type of the compared operands. This is needed to compute the +// cost for a Select / ZExt or SExt instruction. +static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) { + Type *OpTy = nullptr; + if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0))) + OpTy = CI->getOperand(0)->getType(); + else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0))) + if (LogicI->getNumOperands() == 2) + if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0))) + if (isa<CmpInst>(LogicI->getOperand(1))) + OpTy = CI0->getOperand(0)->getType(); + + if (OpTy != nullptr) { + if (VF == 1) { + assert (!OpTy->isVectorTy() && "Expected scalar type"); + return OpTy; + } + // Return the potentially vectorized type based on 'I' and 'VF'. 'I' may + // be either scalar or already vectorized with a same or lesser VF. + Type *ElTy = OpTy->getScalarType(); + return VectorType::get(ElTy, VF); + } + + return nullptr; +} + +int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, + const Instruction *I) { + unsigned DstScalarBits = Dst->getScalarSizeInBits(); + unsigned SrcScalarBits = Src->getScalarSizeInBits(); + + if (Src->isVectorTy()) { + assert (ST->hasVector() && "getCastInstrCost() called with vector type."); + assert (Dst->isVectorTy()); + unsigned VF = Src->getVectorNumElements(); + unsigned NumDstVectors = getNumberOfParts(Dst); + unsigned NumSrcVectors = getNumberOfParts(Src); + + if (Opcode == Instruction::Trunc) { + if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits()) + return 0; // Check for NOOP conversions. + return getVectorTruncCost(Src, Dst); + } + + if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { + if (SrcScalarBits >= 8) { + // ZExt/SExt will be handled with one unpack per doubling of width. + unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst); + + // For types that spans multiple vector registers, some additional + // instructions are used to setup the unpacking. + unsigned NumSrcVectorOps = + (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors) + : (NumDstVectors / 2)); + + return (NumUnpacks * NumDstVectors) + NumSrcVectorOps; + } + else if (SrcScalarBits == 1) { + // This should be extension of a compare i1 result. + // If we know what the widths of the compared operands, get the + // cost of converting it to Dst. Otherwise assume same widths. + unsigned Cost = 0; + Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr); + if (CmpOpTy != nullptr) + Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst); + if (Opcode == Instruction::ZExt) + // One 'vn' per dst vector with an immediate mask. + Cost += NumDstVectors; + return Cost; + } + } + + if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP || + Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) { + // TODO: Fix base implementation which could simplify things a bit here + // (seems to miss on differentiating on scalar/vector types). + + // Only 64 bit vector conversions are natively supported. + if (SrcScalarBits == 64 && DstScalarBits == 64) + return NumDstVectors; + + // Return the cost of multiple scalar invocation plus the cost of + // inserting and extracting the values. Base implementation does not + // realize float->int gets scalarized. + unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(), + Src->getScalarType()); + unsigned TotCost = VF * ScalarCost; + bool NeedsInserts = true, NeedsExtracts = true; + // FP128 registers do not get inserted or extracted. + if (DstScalarBits == 128 && + (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP)) + NeedsInserts = false; + if (SrcScalarBits == 128 && + (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI)) + NeedsExtracts = false; + + TotCost += getScalarizationOverhead(Dst, NeedsInserts, NeedsExtracts); + + // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4. + if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32) + TotCost *= 2; + + return TotCost; + } + + if (Opcode == Instruction::FPTrunc) { + if (SrcScalarBits == 128) // fp128 -> double/float + inserts of elements. + return VF /*ldxbr/lexbr*/ + getScalarizationOverhead(Dst, true, false); + else // double -> float + return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/); + } + + if (Opcode == Instruction::FPExt) { + if (SrcScalarBits == 32 && DstScalarBits == 64) { + // float -> double is very rare and currently unoptimized. Instead of + // using vldeb, which can do two at a time, all conversions are + // scalarized. + return VF * 2; + } + // -> fp128. VF * lxdb/lxeb + extraction of elements. + return VF + getScalarizationOverhead(Src, false, true); + } + } + else { // Scalar + assert (!Dst->isVectorTy()); + + if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP) + return (SrcScalarBits >= 32 ? 1 : 2 /*i8/i16 extend*/); + + if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) && + Src->isIntegerTy(1)) { + // This should be extension of a compare i1 result, which is done with + // ipm and a varying sequence of instructions. + unsigned Cost = 0; + if (Opcode == Instruction::SExt) + Cost = (DstScalarBits < 64 ? 3 : 4); + if (Opcode == Instruction::ZExt) + Cost = 3; + Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr); + if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy()) + // If operands of an fp-type was compared, this costs +1. + Cost++; + + return Cost; + } + } + + return BaseT::getCastInstrCost(Opcode, Dst, Src, I); +} + +int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, + const Instruction *I) { + if (ValTy->isVectorTy()) { + assert (ST->hasVector() && "getCmpSelInstrCost() called with vector type."); + unsigned VF = ValTy->getVectorNumElements(); + + // Called with a compare instruction. + if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) { + unsigned PredicateExtraCost = 0; + if (I != nullptr) { + // Some predicates cost one or two extra instructions. + switch (dyn_cast<CmpInst>(I)->getPredicate()) { + case CmpInst::Predicate::ICMP_NE: + case CmpInst::Predicate::ICMP_UGE: + case CmpInst::Predicate::ICMP_ULE: + case CmpInst::Predicate::ICMP_SGE: + case CmpInst::Predicate::ICMP_SLE: + PredicateExtraCost = 1; + break; + case CmpInst::Predicate::FCMP_ONE: + case CmpInst::Predicate::FCMP_ORD: + case CmpInst::Predicate::FCMP_UEQ: + case CmpInst::Predicate::FCMP_UNO: + PredicateExtraCost = 2; + break; + default: + break; + } + } + + // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of + // floats. FIXME: <2 x float> generates same code as <4 x float>. + unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1); + unsigned NumVecs_cmp = getNumberOfParts(ValTy); + + unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost)); + return Cost; + } + else { // Called with a select instruction. + assert (Opcode == Instruction::Select); + + // We can figure out the extra cost of packing / unpacking if the + // instruction was passed and the compare instruction is found. + unsigned PackCost = 0; + Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr); + if (CmpOpTy != nullptr) + PackCost = + getVectorBitmaskConversionCost(CmpOpTy, ValTy); + + return getNumberOfParts(ValTy) /*vsel*/ + PackCost; + } + } + else { // Scalar + switch (Opcode) { + case Instruction::ICmp: { + unsigned Cost = 1; + if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16) + Cost += 2; // extend both operands + return Cost; + } + case Instruction::Select: + if (ValTy->isFloatingPointTy()) + return 4; // No load on condition for FP, so this costs a conditional jump. + return 1; // Load On Condition. + } + } + + return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, nullptr); +} + +int SystemZTTIImpl:: +getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { + // vlvgp will insert two grs into a vector register, so only count half the + // number of instructions. + if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64)) + return ((Index % 2 == 0) ? 1 : 0); + + if (Opcode == Instruction::ExtractElement) { + int Cost = ((Val->getScalarSizeInBits() == 1) ? 2 /*+test-under-mask*/ : 1); + + // Give a slight penalty for moving out of vector pipeline to FXU unit. + if (Index == 0 && Val->isIntOrIntVectorTy()) + Cost += 1; + + return Cost; + } + + return BaseT::getVectorInstrCost(Opcode, Val, Index); +} + +int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, + unsigned Alignment, unsigned AddressSpace, + const Instruction *I) { + assert(!Src->isVoidTy() && "Invalid type"); + + if (!Src->isVectorTy() && Opcode == Instruction::Load && + I != nullptr && I->hasOneUse()) { + const Instruction *UserI = cast<Instruction>(*I->user_begin()); + unsigned Bits = Src->getScalarSizeInBits(); + bool FoldsLoad = false; + switch (UserI->getOpcode()) { + case Instruction::ICmp: + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: + case Instruction::SDiv: + case Instruction::UDiv: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: + // This also makes sense for float operations, but disabled for now due + // to regressions. + // case Instruction::FCmp: + // case Instruction::FAdd: + // case Instruction::FSub: + // case Instruction::FMul: + // case Instruction::FDiv: + FoldsLoad = (Bits == 32 || Bits == 64); + break; + } + + if (FoldsLoad) { + assert (UserI->getNumOperands() == 2 && + "Expected to only handle binops."); + + // UserI can't fold two loads, so in that case return 0 cost only + // half of the time. + for (unsigned i = 0; i < 2; ++i) { + if (UserI->getOperand(i) == I) + continue; + if (LoadInst *LI = dyn_cast<LoadInst>(UserI->getOperand(i))) { + if (LI->hasOneUse()) + return i == 0; + } + } + + return 0; + } + } + + unsigned NumOps = getNumberOfParts(Src); + + if (Src->getScalarSizeInBits() == 128) + // 128 bit scalars are held in a pair of two 64 bit registers. + NumOps *= 2; + + return NumOps; +} + +int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, + unsigned Factor, + ArrayRef<unsigned> Indices, + unsigned Alignment, + unsigned AddressSpace) { + assert(isa<VectorType>(VecTy) && + "Expect a vector type for interleaved memory op"); + + unsigned WideBits = (VecTy->isPtrOrPtrVectorTy() ? + (64U * VecTy->getVectorNumElements()) : VecTy->getPrimitiveSizeInBits()); + assert (WideBits > 0 && "Could not compute size of vector"); + int NumWideParts = + ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U)); + + // How many source vectors are handled to produce a vectorized operand? + int NumElsPerVector = (VecTy->getVectorNumElements() / NumWideParts); + int NumSrcParts = + ((NumWideParts > NumElsPerVector) ? NumElsPerVector : NumWideParts); + + // A Load group may have gaps. + unsigned NumOperands = + ((Opcode == Instruction::Load) ? Indices.size() : Factor); + + // Each needed permute takes two vectors as input. + if (NumSrcParts > 1) + NumSrcParts--; + int NumPermutes = NumSrcParts * NumOperands; + + // Cost of load/store operations and the permutations needed. + return NumWideParts + NumPermutes; +} diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h index f7d2d82..a0c6fa9 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h +++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h @@ -27,6 +27,8 @@ class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> { const SystemZSubtarget *getST() const { return ST; } const SystemZTargetLowering *getTLI() const { return TLI; } + unsigned const LIBCALL_COST = 30; + public: explicit SystemZTTIImpl(const SystemZTargetMachine *TM, const Function &F) : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), @@ -43,7 +45,8 @@ public: TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth); - void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP); + void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, + TTI::UnrollingPreferences &UP); /// @} @@ -51,8 +54,39 @@ public: /// @{ unsigned getNumberOfRegisters(bool Vector); - unsigned getRegisterBitWidth(bool Vector); - + unsigned getRegisterBitWidth(bool Vector) const; + + unsigned getCacheLineSize() { return 256; } + unsigned getPrefetchDistance() { return 2000; } + unsigned getMinPrefetchStride() { return 2048; } + + bool prefersVectorizedAddressing() { return false; } + bool supportsEfficientVectorElementLoadStore() { return true; } + bool enableInterleavedAccessVectorization() { return true; } + + int getArithmeticInstrCost( + unsigned Opcode, Type *Ty, + TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, + TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, + TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, + TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, + ArrayRef<const Value *> Args = ArrayRef<const Value *>()); + int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp); + unsigned getVectorTruncCost(Type *SrcTy, Type *DstTy); + unsigned getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy); + int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, + const Instruction *I = nullptr); + int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, + const Instruction *I = nullptr); + int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); + int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, + unsigned AddressSpace, const Instruction *I = nullptr); + + int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, + unsigned Factor, + ArrayRef<unsigned> Indices, + unsigned Alignment, + unsigned AddressSpace); /// @} }; |