diff options
Diffstat (limited to 'lib/Target/X86')
26 files changed, 4263 insertions, 580 deletions
diff --git a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp index 8ec5b62..c74b97a 100644 --- a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp +++ b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp @@ -45,12 +45,14 @@ void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op) { } /// print_pcrel_imm - This is used to print an immediate value that ends up -/// being encoded as a pc-relative value. These print slightly differently, for -/// example, a $ is not emitted. +/// being encoded as a pc-relative value (e.g. for jumps and calls). These +/// print slightly differently than normal immediates. For example, a $ is not +/// emitted. void X86ATTInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isImm()) - O << Op.getImm(); + // Print this as a signed 32-bit value. + O << (int)Op.getImm(); else { assert(Op.isExpr() && "unknown pcrel immediate operand"); Op.getExpr()->print(O, &MAI); diff --git a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp b/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp index 38c0c28..1015b69 100644 --- a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp +++ b/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp @@ -355,10 +355,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand. lower_lea64_32mem(&OutMI, 1); break; - case X86::MOV16r0: - OutMI.setOpcode(X86::MOV32r0); - lower_subreg32(&OutMI, 0); - break; case X86::MOVZX16rr8: OutMI.setOpcode(X86::MOVZX32rr8); lower_subreg32(&OutMI, 0); diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt index 3ad65fb..4186fec 100644 --- a/lib/Target/X86/CMakeLists.txt +++ b/lib/Target/X86/CMakeLists.txt @@ -3,6 +3,7 @@ set(LLVM_TARGET_DEFINITIONS X86.td) tablegen(X86GenRegisterInfo.h.inc -gen-register-desc-header) tablegen(X86GenRegisterNames.inc -gen-register-enums) tablegen(X86GenRegisterInfo.inc -gen-register-desc) +tablegen(X86GenDisassemblerTables.inc -gen-disassembler) tablegen(X86GenInstrNames.inc -gen-instr-enums) tablegen(X86GenInstrInfo.inc -gen-instr-desc) tablegen(X86GenAsmWriter.inc -gen-asm-writer) diff --git a/lib/Target/X86/Disassembler/CMakeLists.txt b/lib/Target/X86/Disassembler/CMakeLists.txt index b329e89..2a83a9c 100644 --- a/lib/Target/X86/Disassembler/CMakeLists.txt +++ b/lib/Target/X86/Disassembler/CMakeLists.txt @@ -2,5 +2,6 @@ include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/ add_llvm_library(LLVMX86Disassembler X86Disassembler.cpp + X86DisassemblerDecoder.c ) add_dependencies(LLVMX86Disassembler X86CodeGenTable_gen) diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp index 2ebbc9b..a316860 100644 --- a/lib/Target/X86/Disassembler/X86Disassembler.cpp +++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp @@ -6,18 +6,465 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// +// +// This file is part of the X86 Disassembler. +// It contains code to translate the data produced by the decoder into +// MCInsts. +// Documentation for the disassembler can be found in X86Disassembler.h. +// +//===----------------------------------------------------------------------===// +#include "X86Disassembler.h" +#include "X86DisassemblerDecoder.h" + +#include "llvm/MC/MCDisassembler.h" #include "llvm/MC/MCDisassembler.h" +#include "llvm/MC/MCInst.h" #include "llvm/Target/TargetRegistry.h" -#include "X86.h" +#include "llvm/Support/MemoryObject.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +#include "X86GenRegisterNames.inc" + using namespace llvm; +using namespace llvm::X86Disassembler; + +namespace llvm { + +// Fill-ins to make the compiler happy. These constants are never actually +// assigned; they are just filler to make an automatically-generated switch +// statement work. +namespace X86 { + enum { + BX_SI = 500, + BX_DI = 501, + BP_SI = 502, + BP_DI = 503, + sib = 504, + sib64 = 505 + }; +} + +extern Target TheX86_32Target, TheX86_64Target; + +} + +static void translateInstruction(MCInst &target, + InternalInstruction &source); + +X86GenericDisassembler::X86GenericDisassembler(DisassemblerMode mode) : + MCDisassembler(), + fMode(mode) { +} + +X86GenericDisassembler::~X86GenericDisassembler() { +} + +/// regionReader - a callback function that wraps the readByte method from +/// MemoryObject. +/// +/// @param arg - The generic callback parameter. In this case, this should +/// be a pointer to a MemoryObject. +/// @param byte - A pointer to the byte to be read. +/// @param address - The address to be read. +static int regionReader(void* arg, uint8_t* byte, uint64_t address) { + MemoryObject* region = static_cast<MemoryObject*>(arg); + return region->readByte(address, byte); +} + +/// logger - a callback function that wraps the operator<< method from +/// raw_ostream. +/// +/// @param arg - The generic callback parameter. This should be a pointe +/// to a raw_ostream. +/// @param log - A string to be logged. logger() adds a newline. +static void logger(void* arg, const char* log) { + if (!arg) + return; + + raw_ostream &vStream = *(static_cast<raw_ostream*>(arg)); + vStream << log << "\n"; +} + +// +// Public interface for the disassembler +// + +bool X86GenericDisassembler::getInstruction(MCInst &instr, + uint64_t &size, + const MemoryObject ®ion, + uint64_t address, + raw_ostream &vStream) const { + InternalInstruction internalInstr; + + int ret = decodeInstruction(&internalInstr, + regionReader, + (void*)®ion, + logger, + (void*)&vStream, + address, + fMode); + + if(ret) { + size = internalInstr.readerCursor - address; + return false; + } + else { + size = internalInstr.length; + translateInstruction(instr, internalInstr); + return true; + } +} + +// +// Private code that translates from struct InternalInstructions to MCInsts. +// + +/// translateRegister - Translates an internal register to the appropriate LLVM +/// register, and appends it as an operand to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param reg - The Reg to append. +static void translateRegister(MCInst &mcInst, Reg reg) { +#define ENTRY(x) X86::x, + uint8_t llvmRegnums[] = { + ALL_REGS + 0 + }; +#undef ENTRY + + uint8_t llvmRegnum = llvmRegnums[reg]; + mcInst.addOperand(MCOperand::CreateReg(llvmRegnum)); +} + +/// translateImmediate - Appends an immediate operand to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param immediate - The immediate value to append. +static void translateImmediate(MCInst &mcInst, uint64_t immediate) { + mcInst.addOperand(MCOperand::CreateImm(immediate)); +} + +/// translateRMRegister - Translates a register stored in the R/M field of the +/// ModR/M byte to its LLVM equivalent and appends it to an MCInst. +/// @param mcInst - The MCInst to append to. +/// @param insn - The internal instruction to extract the R/M field +/// from. +static void translateRMRegister(MCInst &mcInst, + InternalInstruction &insn) { + assert(insn.eaBase != EA_BASE_sib && insn.eaBase != EA_BASE_sib64 && + "A R/M register operand may not have a SIB byte"); + + switch (insn.eaBase) { + case EA_BASE_NONE: + llvm_unreachable("EA_BASE_NONE for ModR/M base"); + break; +#define ENTRY(x) case EA_BASE_##x: + ALL_EA_BASES +#undef ENTRY + llvm_unreachable("A R/M register operand may not have a base; " + "the operand must be a register."); + break; +#define ENTRY(x) \ + case EA_REG_##x: \ + mcInst.addOperand(MCOperand::CreateReg(X86::x)); break; + ALL_REGS +#undef ENTRY + default: + llvm_unreachable("Unexpected EA base register"); + } +} + +/// translateRMMemory - Translates a memory operand stored in the Mod and R/M +/// fields of an internal instruction (and possibly its SIB byte) to a memory +/// operand in LLVM's format, and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param insn - The instruction to extract Mod, R/M, and SIB fields +/// from. +/// @param sr - Whether or not to emit the segment register. The +/// LEA instruction does not expect a segment-register +/// operand. +static void translateRMMemory(MCInst &mcInst, + InternalInstruction &insn, + bool sr) { + // Addresses in an MCInst are represented as five operands: + // 1. basereg (register) The R/M base, or (if there is a SIB) the + // SIB base + // 2. scaleamount (immediate) 1, or (if there is a SIB) the specified + // scale amount + // 3. indexreg (register) x86_registerNONE, or (if there is a SIB) + // the index (which is multiplied by the + // scale amount) + // 4. displacement (immediate) 0, or the displacement if there is one + // 5. segmentreg (register) x86_registerNONE for now, but could be set + // if we have segment overrides + + MCOperand baseReg; + MCOperand scaleAmount; + MCOperand indexReg; + MCOperand displacement; + MCOperand segmentReg; + + if (insn.eaBase == EA_BASE_sib || insn.eaBase == EA_BASE_sib64) { + if (insn.sibBase != SIB_BASE_NONE) { + switch (insn.sibBase) { + default: + llvm_unreachable("Unexpected sibBase"); +#define ENTRY(x) \ + case SIB_BASE_##x: \ + baseReg = MCOperand::CreateReg(X86::x); break; + ALL_SIB_BASES +#undef ENTRY + } + } else { + baseReg = MCOperand::CreateReg(0); + } + + if (insn.sibIndex != SIB_INDEX_NONE) { + switch (insn.sibIndex) { + default: + llvm_unreachable("Unexpected sibIndex"); +#define ENTRY(x) \ + case SIB_INDEX_##x: \ + indexReg = MCOperand::CreateReg(X86::x); break; + EA_BASES_32BIT + EA_BASES_64BIT +#undef ENTRY + } + } else { + indexReg = MCOperand::CreateReg(0); + } + + scaleAmount = MCOperand::CreateImm(insn.sibScale); + } else { + switch (insn.eaBase) { + case EA_BASE_NONE: + assert(insn.eaDisplacement != EA_DISP_NONE && + "EA_BASE_NONE and EA_DISP_NONE for ModR/M base"); + + if (insn.mode == MODE_64BIT) + baseReg = MCOperand::CreateReg(X86::RIP); // Section 2.2.1.6 + else + baseReg = MCOperand::CreateReg(0); + + indexReg = MCOperand::CreateReg(0); + break; + case EA_BASE_BX_SI: + baseReg = MCOperand::CreateReg(X86::BX); + indexReg = MCOperand::CreateReg(X86::SI); + break; + case EA_BASE_BX_DI: + baseReg = MCOperand::CreateReg(X86::BX); + indexReg = MCOperand::CreateReg(X86::DI); + break; + case EA_BASE_BP_SI: + baseReg = MCOperand::CreateReg(X86::BP); + indexReg = MCOperand::CreateReg(X86::SI); + break; + case EA_BASE_BP_DI: + baseReg = MCOperand::CreateReg(X86::BP); + indexReg = MCOperand::CreateReg(X86::DI); + break; + default: + indexReg = MCOperand::CreateReg(0); + switch (insn.eaBase) { + default: + llvm_unreachable("Unexpected eaBase"); + break; + // Here, we will use the fill-ins defined above. However, + // BX_SI, BX_DI, BP_SI, and BP_DI are all handled above and + // sib and sib64 were handled in the top-level if, so they're only + // placeholders to keep the compiler happy. +#define ENTRY(x) \ + case EA_BASE_##x: \ + baseReg = MCOperand::CreateReg(X86::x); break; + ALL_EA_BASES +#undef ENTRY +#define ENTRY(x) case EA_REG_##x: + ALL_REGS +#undef ENTRY + llvm_unreachable("A R/M memory operand may not be a register; " + "the base field must be a base."); + break; + } + } + + scaleAmount = MCOperand::CreateImm(1); + } + + displacement = MCOperand::CreateImm(insn.displacement); + + static const uint8_t segmentRegnums[SEG_OVERRIDE_max] = { + 0, // SEG_OVERRIDE_NONE + X86::CS, + X86::SS, + X86::DS, + X86::ES, + X86::FS, + X86::GS + }; + + segmentReg = MCOperand::CreateReg(segmentRegnums[insn.segmentOverride]); + + mcInst.addOperand(baseReg); + mcInst.addOperand(scaleAmount); + mcInst.addOperand(indexReg); + mcInst.addOperand(displacement); + + if (sr) + mcInst.addOperand(segmentReg); +} + +/// translateRM - Translates an operand stored in the R/M (and possibly SIB) +/// byte of an instruction to LLVM form, and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param operand - The operand, as stored in the descriptor table. +/// @param insn - The instruction to extract Mod, R/M, and SIB fields +/// from. +static void translateRM(MCInst &mcInst, + OperandSpecifier &operand, + InternalInstruction &insn) { + switch (operand.type) { + default: + llvm_unreachable("Unexpected type for a R/M operand"); + case TYPE_R8: + case TYPE_R16: + case TYPE_R32: + case TYPE_R64: + case TYPE_Rv: + case TYPE_MM: + case TYPE_MM32: + case TYPE_MM64: + case TYPE_XMM: + case TYPE_XMM32: + case TYPE_XMM64: + case TYPE_XMM128: + case TYPE_DEBUGREG: + case TYPE_CR32: + case TYPE_CR64: + translateRMRegister(mcInst, insn); + break; + case TYPE_M: + case TYPE_M8: + case TYPE_M16: + case TYPE_M32: + case TYPE_M64: + case TYPE_M128: + case TYPE_M512: + case TYPE_Mv: + case TYPE_M32FP: + case TYPE_M64FP: + case TYPE_M80FP: + case TYPE_M16INT: + case TYPE_M32INT: + case TYPE_M64INT: + case TYPE_M1616: + case TYPE_M1632: + case TYPE_M1664: + translateRMMemory(mcInst, insn, true); + break; + case TYPE_LEA: + translateRMMemory(mcInst, insn, false); + break; + } +} + +/// translateFPRegister - Translates a stack position on the FPU stack to its +/// LLVM form, and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param stackPos - The stack position to translate. +static void translateFPRegister(MCInst &mcInst, + uint8_t stackPos) { + assert(stackPos < 8 && "Invalid FP stack position"); + + mcInst.addOperand(MCOperand::CreateReg(X86::ST0 + stackPos)); +} + +/// translateOperand - Translates an operand stored in an internal instruction +/// to LLVM's format and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param operand - The operand, as stored in the descriptor table. +/// @param insn - The internal instruction. +static void translateOperand(MCInst &mcInst, + OperandSpecifier &operand, + InternalInstruction &insn) { + switch (operand.encoding) { + default: + llvm_unreachable("Unhandled operand encoding during translation"); + case ENCODING_REG: + translateRegister(mcInst, insn.reg); + break; + case ENCODING_RM: + translateRM(mcInst, operand, insn); + break; + case ENCODING_CB: + case ENCODING_CW: + case ENCODING_CD: + case ENCODING_CP: + case ENCODING_CO: + case ENCODING_CT: + llvm_unreachable("Translation of code offsets isn't supported."); + case ENCODING_IB: + case ENCODING_IW: + case ENCODING_ID: + case ENCODING_IO: + case ENCODING_Iv: + case ENCODING_Ia: + translateImmediate(mcInst, + insn.immediates[insn.numImmediatesTranslated++]); + break; + case ENCODING_RB: + case ENCODING_RW: + case ENCODING_RD: + case ENCODING_RO: + translateRegister(mcInst, insn.opcodeRegister); + break; + case ENCODING_I: + translateFPRegister(mcInst, insn.opcodeModifier); + break; + case ENCODING_Rv: + translateRegister(mcInst, insn.opcodeRegister); + break; + case ENCODING_DUP: + translateOperand(mcInst, + insn.spec->operands[operand.type - TYPE_DUP0], + insn); + break; + } +} + +/// translateInstruction - Translates an internal instruction and all its +/// operands to an MCInst. +/// +/// @param mcInst - The MCInst to populate with the instruction's data. +/// @param insn - The internal instruction. +static void translateInstruction(MCInst &mcInst, + InternalInstruction &insn) { + assert(insn.spec); + + mcInst.setOpcode(insn.instructionID); + + int index; + + insn.numImmediatesTranslated = 0; + + for (index = 0; index < X86_MAX_OPERANDS; ++index) { + if (insn.spec->operands[index].encoding != ENCODING_NONE) + translateOperand(mcInst, insn.spec->operands[index], insn); + } +} static const MCDisassembler *createX86_32Disassembler(const Target &T) { - return 0; + return new X86Disassembler::X86_32Disassembler; } static const MCDisassembler *createX86_64Disassembler(const Target &T) { - return 0; + return new X86Disassembler::X86_64Disassembler; } extern "C" void LLVMInitializeX86Disassembler() { diff --git a/lib/Target/X86/Disassembler/X86Disassembler.h b/lib/Target/X86/Disassembler/X86Disassembler.h new file mode 100644 index 0000000..0e6e0b0 --- /dev/null +++ b/lib/Target/X86/Disassembler/X86Disassembler.h @@ -0,0 +1,150 @@ +//===- X86Disassembler.h - Disassembler for x86 and x86_64 ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The X86 disassembler is a table-driven disassembler for the 16-, 32-, and +// 64-bit X86 instruction sets. The main decode sequence for an assembly +// instruction in this disassembler is: +// +// 1. Read the prefix bytes and determine the attributes of the instruction. +// These attributes, recorded in enum attributeBits +// (X86DisassemblerDecoderCommon.h), form a bitmask. The table CONTEXTS_SYM +// provides a mapping from bitmasks to contexts, which are represented by +// enum InstructionContext (ibid.). +// +// 2. Read the opcode, and determine what kind of opcode it is. The +// disassembler distinguishes four kinds of opcodes, which are enumerated in +// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte +// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a +// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context. +// +// 3. Depending on the opcode type, look in one of four ClassDecision structures +// (X86DisassemblerDecoderCommon.h). Use the opcode class to determine which +// OpcodeDecision (ibid.) to look the opcode in. Look up the opcode, to get +// a ModRMDecision (ibid.). +// +// 4. Some instructions, such as escape opcodes or extended opcodes, or even +// instructions that have ModRM*Reg / ModRM*Mem forms in LLVM, need the +// ModR/M byte to complete decode. The ModRMDecision's type is an entry from +// ModRMDecisionType (X86DisassemblerDecoderCommon.h) that indicates if the +// ModR/M byte is required and how to interpret it. +// +// 5. After resolving the ModRMDecision, the disassembler has a unique ID +// of type InstrUID (X86DisassemblerDecoderCommon.h). Looking this ID up in +// INSTRUCTIONS_SYM yields the name of the instruction and the encodings and +// meanings of its operands. +// +// 6. For each operand, its encoding is an entry from OperandEncoding +// (X86DisassemblerDecoderCommon.h) and its type is an entry from +// OperandType (ibid.). The encoding indicates how to read it from the +// instruction; the type indicates how to interpret the value once it has +// been read. For example, a register operand could be stored in the R/M +// field of the ModR/M byte, the REG field of the ModR/M byte, or added to +// the main opcode. This is orthogonal from its meaning (an GPR or an XMM +// register, for instance). Given this information, the operands can be +// extracted and interpreted. +// +// 7. As the last step, the disassembler translates the instruction information +// and operands into a format understandable by the client - in this case, an +// MCInst for use by the MC infrastructure. +// +// The disassembler is broken broadly into two parts: the table emitter that +// emits the instruction decode tables discussed above during compilation, and +// the disassembler itself. The table emitter is documented in more detail in +// utils/TableGen/X86DisassemblerEmitter.h. +// +// X86Disassembler.h contains the public interface for the disassembler, +// adhering to the MCDisassembler interface. +// X86Disassembler.cpp contains the code responsible for step 7, and for +// invoking the decoder to execute steps 1-6. +// X86DisassemblerDecoderCommon.h contains the definitions needed by both the +// table emitter and the disassembler. +// X86DisassemblerDecoder.h contains the public interface of the decoder, +// factored out into C for possible use by other projects. +// X86DisassemblerDecoder.c contains the source code of the decoder, which is +// responsible for steps 1-6. +// +//===----------------------------------------------------------------------===// + +#ifndef X86DISASSEMBLER_H +#define X86DISASSEMBLER_H + +#define INSTRUCTION_SPECIFIER_FIELDS \ + const char* name; + +#define INSTRUCTION_IDS \ + InstrUID* instructionIDs; + +#include "X86DisassemblerDecoderCommon.h" + +#undef INSTRUCTION_SPECIFIER_FIELDS +#undef INSTRUCTION_IDS + +#include "llvm/MC/MCDisassembler.h" + +struct InternalInstruction; + +namespace llvm { + +class MCInst; +class MemoryObject; +class raw_ostream; + +namespace X86Disassembler { + +/// X86GenericDisassembler - Generic disassembler for all X86 platforms. +/// All each platform class should have to do is subclass the constructor, and +/// provide a different disassemblerMode value. +class X86GenericDisassembler : public MCDisassembler { +protected: + /// Constructor - Initializes the disassembler. + /// + /// @param mode - The X86 architecture mode to decode for. + X86GenericDisassembler(DisassemblerMode mode); +public: + ~X86GenericDisassembler(); + + /// getInstruction - See MCDisassembler. + bool getInstruction(MCInst &instr, + uint64_t &size, + const MemoryObject ®ion, + uint64_t address, + raw_ostream &vStream) const; +private: + DisassemblerMode fMode; +}; + +/// X86_16Disassembler - 16-bit X86 disassembler. +class X86_16Disassembler : public X86GenericDisassembler { +public: + X86_16Disassembler() : + X86GenericDisassembler(MODE_16BIT) { + } +}; + +/// X86_16Disassembler - 32-bit X86 disassembler. +class X86_32Disassembler : public X86GenericDisassembler { +public: + X86_32Disassembler() : + X86GenericDisassembler(MODE_32BIT) { + } +}; + +/// X86_16Disassembler - 64-bit X86 disassembler. +class X86_64Disassembler : public X86GenericDisassembler { +public: + X86_64Disassembler() : + X86GenericDisassembler(MODE_64BIT) { + } +}; + +} // namespace X86Disassembler + +} // namespace llvm + +#endif diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c new file mode 100644 index 0000000..a0a04ba --- /dev/null +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c @@ -0,0 +1,1365 @@ +/*===- X86DisassemblerDecoder.c - Disassembler decoder -------------*- C -*-==* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file is part of the X86 Disassembler. + * It contains the implementation of the instruction decoder. + * Documentation for the disassembler can be found in X86Disassembler.h. + * + *===----------------------------------------------------------------------===*/ + +#include <assert.h> /* for assert() */ +#include <stdarg.h> /* for va_*() */ +#include <stdio.h> /* for vsnprintf() */ +#include <stdlib.h> /* for exit() */ +#include <string.h> /* for memset() */ + +#include "X86DisassemblerDecoder.h" + +#include "X86GenDisassemblerTables.inc" + +#define TRUE 1 +#define FALSE 0 + +#ifdef __GNUC__ +#define NORETURN __attribute__((noreturn)) +#else +#define NORETURN +#endif + +#define unreachable(s) \ + do { \ + fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, s); \ + exit(-1); \ + } while (0); + +/* + * contextForAttrs - Client for the instruction context table. Takes a set of + * attributes and returns the appropriate decode context. + * + * @param attrMask - Attributes, from the enumeration attributeBits. + * @return - The InstructionContext to use when looking up an + * an instruction with these attributes. + */ +static InstructionContext contextForAttrs(uint8_t attrMask) { + return CONTEXTS_SYM[attrMask]; +} + +/* + * modRMRequired - Reads the appropriate instruction table to determine whether + * the ModR/M byte is required to decode a particular instruction. + * + * @param type - The opcode type (i.e., how many bytes it has). + * @param insnContext - The context for the instruction, as returned by + * contextForAttrs. + * @param opcode - The last byte of the instruction's opcode, not counting + * ModR/M extensions and escapes. + * @return - TRUE if the ModR/M byte is required, FALSE otherwise. + */ +static int modRMRequired(OpcodeType type, + InstructionContext insnContext, + uint8_t opcode) { + const struct ContextDecision* decision = 0; + + switch (type) { + case ONEBYTE: + decision = &ONEBYTE_SYM; + break; + case TWOBYTE: + decision = &TWOBYTE_SYM; + break; + case THREEBYTE_38: + decision = &THREEBYTE38_SYM; + break; + case THREEBYTE_3A: + decision = &THREEBYTE3A_SYM; + break; + } + + return decision->opcodeDecisions[insnContext].modRMDecisions[opcode]. + modrm_type != MODRM_ONEENTRY; + + unreachable("Unknown opcode type"); + return 0; +} + +/* + * decode - Reads the appropriate instruction table to obtain the unique ID of + * an instruction. + * + * @param type - See modRMRequired(). + * @param insnContext - See modRMRequired(). + * @param opcode - See modRMRequired(). + * @param modRM - The ModR/M byte if required, or any value if not. + */ +static InstrUID decode(OpcodeType type, + InstructionContext insnContext, + uint8_t opcode, + uint8_t modRM) { + struct ModRMDecision* dec; + + switch (type) { + default: + unreachable("Unknown opcode type"); + case ONEBYTE: + dec = &ONEBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + case TWOBYTE: + dec = &TWOBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + case THREEBYTE_38: + dec = &THREEBYTE38_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + case THREEBYTE_3A: + dec = &THREEBYTE3A_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + } + + switch (dec->modrm_type) { + default: + unreachable("Corrupt table! Unknown modrm_type"); + case MODRM_ONEENTRY: + return dec->instructionIDs[0]; + case MODRM_SPLITRM: + if (modFromModRM(modRM) == 0x3) + return dec->instructionIDs[1]; + else + return dec->instructionIDs[0]; + case MODRM_FULL: + return dec->instructionIDs[modRM]; + } + + return 0; +} + +/* + * specifierForUID - Given a UID, returns the name and operand specification for + * that instruction. + * + * @param uid - The unique ID for the instruction. This should be returned by + * decode(); specifierForUID will not check bounds. + * @return - A pointer to the specification for that instruction. + */ +static struct InstructionSpecifier* specifierForUID(InstrUID uid) { + return &INSTRUCTIONS_SYM[uid]; +} + +/* + * consumeByte - Uses the reader function provided by the user to consume one + * byte from the instruction's memory and advance the cursor. + * + * @param insn - The instruction with the reader function to use. The cursor + * for this instruction is advanced. + * @param byte - A pointer to a pre-allocated memory buffer to be populated + * with the data read. + * @return - 0 if the read was successful; nonzero otherwise. + */ +static int consumeByte(struct InternalInstruction* insn, uint8_t* byte) { + int ret = insn->reader(insn->readerArg, byte, insn->readerCursor); + + if (!ret) + ++(insn->readerCursor); + + return ret; +} + +/* + * lookAtByte - Like consumeByte, but does not advance the cursor. + * + * @param insn - See consumeByte(). + * @param byte - See consumeByte(). + * @return - See consumeByte(). + */ +static int lookAtByte(struct InternalInstruction* insn, uint8_t* byte) { + return insn->reader(insn->readerArg, byte, insn->readerCursor); +} + +static void unconsumeByte(struct InternalInstruction* insn) { + insn->readerCursor--; +} + +#define CONSUME_FUNC(name, type) \ + static int name(struct InternalInstruction* insn, type* ptr) { \ + type combined = 0; \ + unsigned offset; \ + for (offset = 0; offset < sizeof(type); ++offset) { \ + uint8_t byte; \ + int ret = insn->reader(insn->readerArg, \ + &byte, \ + insn->readerCursor + offset); \ + if (ret) \ + return ret; \ + combined = combined | ((type)byte << ((type)offset * 8)); \ + } \ + *ptr = combined; \ + insn->readerCursor += sizeof(type); \ + return 0; \ + } + +/* + * consume* - Use the reader function provided by the user to consume data + * values of various sizes from the instruction's memory and advance the + * cursor appropriately. These readers perform endian conversion. + * + * @param insn - See consumeByte(). + * @param ptr - A pointer to a pre-allocated memory of appropriate size to + * be populated with the data read. + * @return - See consumeByte(). + */ +CONSUME_FUNC(consumeInt8, int8_t) +CONSUME_FUNC(consumeInt16, int16_t) +CONSUME_FUNC(consumeInt32, int32_t) +CONSUME_FUNC(consumeUInt16, uint16_t) +CONSUME_FUNC(consumeUInt32, uint32_t) +CONSUME_FUNC(consumeUInt64, uint64_t) + +/* + * dbgprintf - Uses the logging function provided by the user to log a single + * message, typically without a carriage-return. + * + * @param insn - The instruction containing the logging function. + * @param format - See printf(). + * @param ... - See printf(). + */ +static void dbgprintf(struct InternalInstruction* insn, + const char* format, + ...) { + char buffer[256]; + va_list ap; + + if (!insn->dlog) + return; + + va_start(ap, format); + (void)vsnprintf(buffer, sizeof(buffer), format, ap); + va_end(ap); + + insn->dlog(insn->dlogArg, buffer); + + return; +} + +/* + * setPrefixPresent - Marks that a particular prefix is present at a particular + * location. + * + * @param insn - The instruction to be marked as having the prefix. + * @param prefix - The prefix that is present. + * @param location - The location where the prefix is located (in the address + * space of the instruction's reader). + */ +static void setPrefixPresent(struct InternalInstruction* insn, + uint8_t prefix, + uint64_t location) +{ + insn->prefixPresent[prefix] = 1; + insn->prefixLocations[prefix] = location; +} + +/* + * isPrefixAtLocation - Queries an instruction to determine whether a prefix is + * present at a given location. + * + * @param insn - The instruction to be queried. + * @param prefix - The prefix. + * @param location - The location to query. + * @return - Whether the prefix is at that location. + */ +static BOOL isPrefixAtLocation(struct InternalInstruction* insn, + uint8_t prefix, + uint64_t location) +{ + if (insn->prefixPresent[prefix] == 1 && + insn->prefixLocations[prefix] == location) + return TRUE; + else + return FALSE; +} + +/* + * readPrefixes - Consumes all of an instruction's prefix bytes, and marks the + * instruction as having them. Also sets the instruction's default operand, + * address, and other relevant data sizes to report operands correctly. + * + * @param insn - The instruction whose prefixes are to be read. + * @return - 0 if the instruction could be read until the end of the prefix + * bytes, and no prefixes conflicted; nonzero otherwise. + */ +static int readPrefixes(struct InternalInstruction* insn) { + BOOL isPrefix = TRUE; + BOOL prefixGroups[4] = { FALSE }; + uint64_t prefixLocation; + uint8_t byte; + + BOOL hasAdSize = FALSE; + BOOL hasOpSize = FALSE; + + dbgprintf(insn, "readPrefixes()"); + + while (isPrefix) { + prefixLocation = insn->readerCursor; + + if (consumeByte(insn, &byte)) + return -1; + + switch (byte) { + case 0xf0: /* LOCK */ + case 0xf2: /* REPNE/REPNZ */ + case 0xf3: /* REP or REPE/REPZ */ + if (prefixGroups[0]) + dbgprintf(insn, "Redundant Group 1 prefix"); + prefixGroups[0] = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + case 0x2e: /* CS segment override -OR- Branch not taken */ + case 0x36: /* SS segment override -OR- Branch taken */ + case 0x3e: /* DS segment override */ + case 0x26: /* ES segment override */ + case 0x64: /* FS segment override */ + case 0x65: /* GS segment override */ + switch (byte) { + case 0x2e: + insn->segmentOverride = SEG_OVERRIDE_CS; + break; + case 0x36: + insn->segmentOverride = SEG_OVERRIDE_SS; + break; + case 0x3e: + insn->segmentOverride = SEG_OVERRIDE_DS; + break; + case 0x26: + insn->segmentOverride = SEG_OVERRIDE_ES; + break; + case 0x64: + insn->segmentOverride = SEG_OVERRIDE_FS; + break; + case 0x65: + insn->segmentOverride = SEG_OVERRIDE_GS; + break; + default: + unreachable("Unhandled override"); + } + if (prefixGroups[1]) + dbgprintf(insn, "Redundant Group 2 prefix"); + prefixGroups[1] = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + case 0x66: /* Operand-size override */ + if (prefixGroups[2]) + dbgprintf(insn, "Redundant Group 3 prefix"); + prefixGroups[2] = TRUE; + hasOpSize = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + case 0x67: /* Address-size override */ + if (prefixGroups[3]) + dbgprintf(insn, "Redundant Group 4 prefix"); + prefixGroups[3] = TRUE; + hasAdSize = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + default: /* Not a prefix byte */ + isPrefix = FALSE; + break; + } + + if (isPrefix) + dbgprintf(insn, "Found prefix 0x%hhx", byte); + } + + if (insn->mode == MODE_64BIT) { + if ((byte & 0xf0) == 0x40) { + uint8_t opcodeByte; + + if(lookAtByte(insn, &opcodeByte) || ((opcodeByte & 0xf0) == 0x40)) { + dbgprintf(insn, "Redundant REX prefix"); + return -1; + } + + insn->rexPrefix = byte; + insn->necessaryPrefixLocation = insn->readerCursor - 2; + + dbgprintf(insn, "Found REX prefix 0x%hhx", byte); + } else { + unconsumeByte(insn); + insn->necessaryPrefixLocation = insn->readerCursor - 1; + } + } else { + unconsumeByte(insn); + } + + if (insn->mode == MODE_16BIT) { + insn->registerSize = (hasOpSize ? 4 : 2); + insn->addressSize = (hasAdSize ? 4 : 2); + insn->displacementSize = (hasAdSize ? 4 : 2); + insn->immediateSize = (hasOpSize ? 4 : 2); + } else if (insn->mode == MODE_32BIT) { + insn->registerSize = (hasOpSize ? 2 : 4); + insn->addressSize = (hasAdSize ? 2 : 4); + insn->displacementSize = (hasAdSize ? 2 : 4); + insn->immediateSize = (hasAdSize ? 2 : 4); + } else if (insn->mode == MODE_64BIT) { + if (insn->rexPrefix && wFromREX(insn->rexPrefix)) { + insn->registerSize = 8; + insn->addressSize = (hasAdSize ? 4 : 8); + insn->displacementSize = 4; + insn->immediateSize = 4; + } else if (insn->rexPrefix) { + insn->registerSize = (hasOpSize ? 2 : 4); + insn->addressSize = (hasAdSize ? 4 : 8); + insn->displacementSize = (hasOpSize ? 2 : 4); + insn->immediateSize = (hasOpSize ? 2 : 4); + } else { + insn->registerSize = (hasOpSize ? 2 : 4); + insn->addressSize = (hasAdSize ? 4 : 8); + insn->displacementSize = (hasOpSize ? 2 : 4); + insn->immediateSize = (hasOpSize ? 2 : 4); + } + } + + return 0; +} + +/* + * readOpcode - Reads the opcode (excepting the ModR/M byte in the case of + * extended or escape opcodes). + * + * @param insn - The instruction whose opcode is to be read. + * @return - 0 if the opcode could be read successfully; nonzero otherwise. + */ +static int readOpcode(struct InternalInstruction* insn) { + /* Determine the length of the primary opcode */ + + uint8_t current; + + dbgprintf(insn, "readOpcode()"); + + insn->opcodeType = ONEBYTE; + if (consumeByte(insn, ¤t)) + return -1; + + if (current == 0x0f) { + dbgprintf(insn, "Found a two-byte escape prefix (0x%hhx)", current); + + insn->twoByteEscape = current; + + if (consumeByte(insn, ¤t)) + return -1; + + if (current == 0x38) { + dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); + + insn->threeByteEscape = current; + + if (consumeByte(insn, ¤t)) + return -1; + + insn->opcodeType = THREEBYTE_38; + } else if (current == 0x3a) { + dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); + + insn->threeByteEscape = current; + + if (consumeByte(insn, ¤t)) + return -1; + + insn->opcodeType = THREEBYTE_3A; + } else { + dbgprintf(insn, "Didn't find a three-byte escape prefix"); + + insn->opcodeType = TWOBYTE; + } + } + + /* + * At this point we have consumed the full opcode. + * Anything we consume from here on must be unconsumed. + */ + + insn->opcode = current; + + return 0; +} + +static int readModRM(struct InternalInstruction* insn); + +/* + * getIDWithAttrMask - Determines the ID of an instruction, consuming + * the ModR/M byte as appropriate for extended and escape opcodes, + * and using a supplied attribute mask. + * + * @param instructionID - A pointer whose target is filled in with the ID of the + * instruction. + * @param insn - The instruction whose ID is to be determined. + * @param attrMask - The attribute mask to search. + * @return - 0 if the ModR/M could be read when needed or was not + * needed; nonzero otherwise. + */ +static int getIDWithAttrMask(uint16_t* instructionID, + struct InternalInstruction* insn, + uint8_t attrMask) { + BOOL hasModRMExtension; + + uint8_t instructionClass; + + instructionClass = contextForAttrs(attrMask); + + hasModRMExtension = modRMRequired(insn->opcodeType, + instructionClass, + insn->opcode); + + if (hasModRMExtension) { + readModRM(insn); + + *instructionID = decode(insn->opcodeType, + instructionClass, + insn->opcode, + insn->modRM); + } else { + *instructionID = decode(insn->opcodeType, + instructionClass, + insn->opcode, + 0); + } + + return 0; +} + +/* + * is16BitEquivalent - Determines whether two instruction names refer to + * equivalent instructions but one is 16-bit whereas the other is not. + * + * @param orig - The instruction that is not 16-bit + * @param equiv - The instruction that is 16-bit + */ +static BOOL is16BitEquvalent(const char* orig, const char* equiv) { + off_t i; + + for(i = 0;; i++) { + if(orig[i] == '\0' && equiv[i] == '\0') + return TRUE; + if(orig[i] == '\0' || equiv[i] == '\0') + return FALSE; + if(orig[i] != equiv[i]) { + if((orig[i] == 'Q' || orig[i] == 'L') && equiv[i] == 'W') + continue; + if((orig[i] == '6' || orig[i] == '3') && equiv[i] == '1') + continue; + if((orig[i] == '4' || orig[i] == '2') && equiv[i] == '6') + continue; + return FALSE; + } + } +} + +/* + * is64BitEquivalent - Determines whether two instruction names refer to + * equivalent instructions but one is 64-bit whereas the other is not. + * + * @param orig - The instruction that is not 64-bit + * @param equiv - The instruction that is 64-bit + */ +static BOOL is64BitEquivalent(const char* orig, const char* equiv) { + off_t i; + + for(i = 0;; i++) { + if(orig[i] == '\0' && equiv[i] == '\0') + return TRUE; + if(orig[i] == '\0' || equiv[i] == '\0') + return FALSE; + if(orig[i] != equiv[i]) { + if((orig[i] == 'W' || orig[i] == 'L') && equiv[i] == 'Q') + continue; + if((orig[i] == '1' || orig[i] == '3') && equiv[i] == '6') + continue; + if((orig[i] == '6' || orig[i] == '2') && equiv[i] == '4') + continue; + return FALSE; + } + } +} + + +/* + * getID - Determines the ID of an instruction, consuming the ModR/M byte as + * appropriate for extended and escape opcodes. Determines the attributes and + * context for the instruction before doing so. + * + * @param insn - The instruction whose ID is to be determined. + * @return - 0 if the ModR/M could be read when needed or was not needed; + * nonzero otherwise. + */ +static int getID(struct InternalInstruction* insn) { + uint8_t attrMask; + uint16_t instructionID; + + dbgprintf(insn, "getID()"); + + attrMask = ATTR_NONE; + + if (insn->mode == MODE_64BIT) + attrMask |= ATTR_64BIT; + + if (insn->rexPrefix & 0x08) + attrMask |= ATTR_REXW; + + if (isPrefixAtLocation(insn, 0x66, insn->necessaryPrefixLocation)) + attrMask |= ATTR_OPSIZE; + else if (isPrefixAtLocation(insn, 0xf3, insn->necessaryPrefixLocation)) + attrMask |= ATTR_XS; + else if (isPrefixAtLocation(insn, 0xf2, insn->necessaryPrefixLocation)) + attrMask |= ATTR_XD; + + if(getIDWithAttrMask(&instructionID, insn, attrMask)) + return -1; + + /* The following clauses compensate for limitations of the tables. */ + + if ((attrMask & ATTR_XD) && (attrMask & ATTR_REXW)) { + /* + * Although for SSE instructions it is usually necessary to treat REX.W+F2 + * as F2 for decode (in the absence of a 64BIT_REXW_XD category) there is + * an occasional instruction where F2 is incidental and REX.W is the more + * significant. If the decoded instruction is 32-bit and adding REX.W + * instead of F2 changes a 32 to a 64, we adopt the new encoding. + */ + + struct InstructionSpecifier* spec; + uint16_t instructionIDWithREXw; + struct InstructionSpecifier* specWithREXw; + + spec = specifierForUID(instructionID); + + if (getIDWithAttrMask(&instructionIDWithREXw, + insn, + attrMask & (~ATTR_XD))) { + /* + * Decoding with REX.w would yield nothing; give up and return original + * decode. + */ + + insn->instructionID = instructionID; + insn->spec = spec; + return 0; + } + + specWithREXw = specifierForUID(instructionIDWithREXw); + + if (is64BitEquivalent(spec->name, specWithREXw->name)) { + insn->instructionID = instructionIDWithREXw; + insn->spec = specWithREXw; + } else { + insn->instructionID = instructionID; + insn->spec = spec; + } + return 0; + } + + if (insn->prefixPresent[0x66] && !(attrMask & ATTR_OPSIZE)) { + /* + * The instruction tables make no distinction between instructions that + * allow OpSize anywhere (i.e., 16-bit operations) and that need it in a + * particular spot (i.e., many MMX operations). In general we're + * conservative, but in the specific case where OpSize is present but not + * in the right place we check if there's a 16-bit operation. + */ + + struct InstructionSpecifier* spec; + uint16_t instructionIDWithOpsize; + struct InstructionSpecifier* specWithOpsize; + + spec = specifierForUID(instructionID); + + if (getIDWithAttrMask(&instructionIDWithOpsize, + insn, + attrMask | ATTR_OPSIZE)) { + /* + * ModRM required with OpSize but not present; give up and return version + * without OpSize set + */ + + insn->instructionID = instructionID; + insn->spec = spec; + return 0; + } + + specWithOpsize = specifierForUID(instructionIDWithOpsize); + + if (is16BitEquvalent(spec->name, specWithOpsize->name)) { + insn->instructionID = instructionIDWithOpsize; + insn->spec = specWithOpsize; + } else { + insn->instructionID = instructionID; + insn->spec = spec; + } + return 0; + } + + insn->instructionID = instructionID; + insn->spec = specifierForUID(insn->instructionID); + + return 0; +} + +/* + * readSIB - Consumes the SIB byte to determine addressing information for an + * instruction. + * + * @param insn - The instruction whose SIB byte is to be read. + * @return - 0 if the SIB byte was successfully read; nonzero otherwise. + */ +static int readSIB(struct InternalInstruction* insn) { + SIBIndex sibIndexBase = 0; + SIBBase sibBaseBase = 0; + uint8_t index, base; + + dbgprintf(insn, "readSIB()"); + + if (insn->consumedSIB) + return 0; + + insn->consumedSIB = TRUE; + + switch (insn->addressSize) { + case 2: + dbgprintf(insn, "SIB-based addressing doesn't work in 16-bit mode"); + return -1; + break; + case 4: + sibIndexBase = SIB_INDEX_EAX; + sibBaseBase = SIB_BASE_EAX; + break; + case 8: + sibIndexBase = SIB_INDEX_RAX; + sibBaseBase = SIB_BASE_RAX; + break; + } + + if (consumeByte(insn, &insn->sib)) + return -1; + + index = indexFromSIB(insn->sib) | (xFromREX(insn->rexPrefix) << 3); + + switch (index) { + case 0x4: + insn->sibIndex = SIB_INDEX_NONE; + break; + default: + insn->sibIndex = (EABase)(sibIndexBase + index); + if (insn->sibIndex == SIB_INDEX_sib || + insn->sibIndex == SIB_INDEX_sib64) + insn->sibIndex = SIB_INDEX_NONE; + break; + } + + switch (scaleFromSIB(insn->sib)) { + case 0: + insn->sibScale = 1; + break; + case 1: + insn->sibScale = 2; + break; + case 2: + insn->sibScale = 4; + break; + case 3: + insn->sibScale = 8; + break; + } + + base = baseFromSIB(insn->sib) | (bFromREX(insn->rexPrefix) << 3); + + switch (base) { + case 0x5: + switch (modFromModRM(insn->modRM)) { + case 0x0: + insn->eaDisplacement = EA_DISP_32; + insn->sibBase = SIB_BASE_NONE; + break; + case 0x1: + insn->eaDisplacement = EA_DISP_8; + insn->sibBase = (insn->addressSize == 4 ? + SIB_BASE_EBP : SIB_BASE_RBP); + break; + case 0x2: + insn->eaDisplacement = EA_DISP_32; + insn->sibBase = (insn->addressSize == 4 ? + SIB_BASE_EBP : SIB_BASE_RBP); + break; + case 0x3: + unreachable("Cannot have Mod = 0b11 and a SIB byte"); + } + break; + default: + insn->sibBase = (EABase)(sibBaseBase + base); + break; + } + + return 0; +} + +/* + * readDisplacement - Consumes the displacement of an instruction. + * + * @param insn - The instruction whose displacement is to be read. + * @return - 0 if the displacement byte was successfully read; nonzero + * otherwise. + */ +static int readDisplacement(struct InternalInstruction* insn) { + int8_t d8; + int16_t d16; + int32_t d32; + + dbgprintf(insn, "readDisplacement()"); + + if (insn->consumedDisplacement) + return 0; + + insn->consumedDisplacement = TRUE; + + switch (insn->eaDisplacement) { + case EA_DISP_NONE: + insn->consumedDisplacement = FALSE; + break; + case EA_DISP_8: + if (consumeInt8(insn, &d8)) + return -1; + insn->displacement = d8; + break; + case EA_DISP_16: + if (consumeInt16(insn, &d16)) + return -1; + insn->displacement = d16; + break; + case EA_DISP_32: + if (consumeInt32(insn, &d32)) + return -1; + insn->displacement = d32; + break; + } + + insn->consumedDisplacement = TRUE; + return 0; +} + +/* + * readModRM - Consumes all addressing information (ModR/M byte, SIB byte, and + * displacement) for an instruction and interprets it. + * + * @param insn - The instruction whose addressing information is to be read. + * @return - 0 if the information was successfully read; nonzero otherwise. + */ +static int readModRM(struct InternalInstruction* insn) { + uint8_t mod, rm, reg; + + dbgprintf(insn, "readModRM()"); + + if (insn->consumedModRM) + return 0; + + consumeByte(insn, &insn->modRM); + insn->consumedModRM = TRUE; + + mod = modFromModRM(insn->modRM); + rm = rmFromModRM(insn->modRM); + reg = regFromModRM(insn->modRM); + + /* + * This goes by insn->registerSize to pick the correct register, which messes + * up if we're using (say) XMM or 8-bit register operands. That gets fixed in + * fixupReg(). + */ + switch (insn->registerSize) { + case 2: + insn->regBase = MODRM_REG_AX; + insn->eaRegBase = EA_REG_AX; + break; + case 4: + insn->regBase = MODRM_REG_EAX; + insn->eaRegBase = EA_REG_EAX; + break; + case 8: + insn->regBase = MODRM_REG_RAX; + insn->eaRegBase = EA_REG_RAX; + break; + } + + reg |= rFromREX(insn->rexPrefix) << 3; + rm |= bFromREX(insn->rexPrefix) << 3; + + insn->reg = (Reg)(insn->regBase + reg); + + switch (insn->addressSize) { + case 2: + insn->eaBaseBase = EA_BASE_BX_SI; + + switch (mod) { + case 0x0: + if (rm == 0x6) { + insn->eaBase = EA_BASE_NONE; + insn->eaDisplacement = EA_DISP_16; + if(readDisplacement(insn)) + return -1; + } else { + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + insn->eaDisplacement = EA_DISP_NONE; + } + break; + case 0x1: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + insn->eaDisplacement = EA_DISP_8; + if(readDisplacement(insn)) + return -1; + break; + case 0x2: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + insn->eaDisplacement = EA_DISP_16; + if(readDisplacement(insn)) + return -1; + break; + case 0x3: + insn->eaBase = (EABase)(insn->eaRegBase + rm); + if(readDisplacement(insn)) + return -1; + break; + } + break; + case 4: + case 8: + insn->eaBaseBase = (insn->addressSize == 4 ? EA_BASE_EAX : EA_BASE_RAX); + + switch (mod) { + case 0x0: + insn->eaDisplacement = EA_DISP_NONE; /* readSIB may override this */ + switch (rm) { + case 0x4: + case 0xc: /* in case REXW.b is set */ + insn->eaBase = (insn->addressSize == 4 ? + EA_BASE_sib : EA_BASE_sib64); + readSIB(insn); + if(readDisplacement(insn)) + return -1; + break; + case 0x5: + insn->eaBase = EA_BASE_NONE; + insn->eaDisplacement = EA_DISP_32; + if(readDisplacement(insn)) + return -1; + break; + default: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + break; + } + break; + case 0x1: + case 0x2: + insn->eaDisplacement = (mod == 0x1 ? EA_DISP_8 : EA_DISP_32); + switch (rm) { + case 0x4: + case 0xc: /* in case REXW.b is set */ + insn->eaBase = EA_BASE_sib; + readSIB(insn); + if(readDisplacement(insn)) + return -1; + break; + default: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + if(readDisplacement(insn)) + return -1; + break; + } + break; + case 0x3: + insn->eaDisplacement = EA_DISP_NONE; + insn->eaBase = (EABase)(insn->eaRegBase + rm); + break; + } + break; + } /* switch (insn->addressSize) */ + + return 0; +} + +#define GENERIC_FIXUP_FUNC(name, base, prefix) \ + static uint8_t name(struct InternalInstruction *insn, \ + OperandType type, \ + uint8_t index, \ + uint8_t *valid) { \ + *valid = 1; \ + switch (type) { \ + default: \ + unreachable("Unhandled register type"); \ + case TYPE_Rv: \ + return base + index; \ + case TYPE_R8: \ + if(insn->rexPrefix && \ + index >= 4 && index <= 7) { \ + return prefix##_SPL + (index - 4); \ + } else { \ + return prefix##_AL + index; \ + } \ + case TYPE_R16: \ + return prefix##_AX + index; \ + case TYPE_R32: \ + return prefix##_EAX + index; \ + case TYPE_R64: \ + return prefix##_RAX + index; \ + case TYPE_XMM128: \ + case TYPE_XMM64: \ + case TYPE_XMM32: \ + case TYPE_XMM: \ + return prefix##_XMM0 + index; \ + case TYPE_MM64: \ + case TYPE_MM32: \ + case TYPE_MM: \ + if(index > 7) \ + *valid = 0; \ + return prefix##_MM0 + index; \ + case TYPE_SEGMENTREG: \ + if(index > 5) \ + *valid = 0; \ + return prefix##_ES + index; \ + case TYPE_DEBUGREG: \ + if(index > 7) \ + *valid = 0; \ + return prefix##_DR0 + index; \ + case TYPE_CR32: \ + if(index > 7) \ + *valid = 0; \ + return prefix##_ECR0 + index; \ + case TYPE_CR64: \ + if(index > 8) \ + *valid = 0; \ + return prefix##_RCR0 + index; \ + } \ + } + +/* + * fixup*Value - Consults an operand type to determine the meaning of the + * reg or R/M field. If the operand is an XMM operand, for example, an + * operand would be XMM0 instead of AX, which readModRM() would otherwise + * misinterpret it as. + * + * @param insn - The instruction containing the operand. + * @param type - The operand type. + * @param index - The existing value of the field as reported by readModRM(). + * @param valid - The address of a uint8_t. The target is set to 1 if the + * field is valid for the register class; 0 if not. + */ +GENERIC_FIXUP_FUNC(fixupRegValue, insn->regBase, MODRM_REG) +GENERIC_FIXUP_FUNC(fixupRMValue, insn->eaRegBase, EA_REG) + +/* + * fixupReg - Consults an operand specifier to determine which of the + * fixup*Value functions to use in correcting readModRM()'ss interpretation. + * + * @param insn - See fixup*Value(). + * @param op - The operand specifier. + * @return - 0 if fixup was successful; -1 if the register returned was + * invalid for its class. + */ +static int fixupReg(struct InternalInstruction *insn, + struct OperandSpecifier *op) { + uint8_t valid; + + dbgprintf(insn, "fixupReg()"); + + switch ((OperandEncoding)op->encoding) { + default: + unreachable("Expected a REG or R/M encoding in fixupReg"); + case ENCODING_REG: + insn->reg = (Reg)fixupRegValue(insn, + (OperandType)op->type, + insn->reg - insn->regBase, + &valid); + if (!valid) + return -1; + break; + case ENCODING_RM: + if (insn->eaBase >= insn->eaRegBase) { + insn->eaBase = (EABase)fixupRMValue(insn, + (OperandType)op->type, + insn->eaBase - insn->eaRegBase, + &valid); + if (!valid) + return -1; + } + break; + } + + return 0; +} + +/* + * readOpcodeModifier - Reads an operand from the opcode field of an + * instruction. Handles AddRegFrm instructions. + * + * @param insn - The instruction whose opcode field is to be read. + * @param inModRM - Indicates that the opcode field is to be read from the + * ModR/M extension; useful for escape opcodes + */ +static void readOpcodeModifier(struct InternalInstruction* insn) { + dbgprintf(insn, "readOpcodeModifier()"); + + if (insn->consumedOpcodeModifier) + return; + + insn->consumedOpcodeModifier = TRUE; + + switch(insn->spec->modifierType) { + default: + unreachable("Unknown modifier type."); + case MODIFIER_NONE: + unreachable("No modifier but an operand expects one."); + case MODIFIER_OPCODE: + insn->opcodeModifier = insn->opcode - insn->spec->modifierBase; + break; + case MODIFIER_MODRM: + insn->opcodeModifier = insn->modRM - insn->spec->modifierBase; + break; + } +} + +/* + * readOpcodeRegister - Reads an operand from the opcode field of an + * instruction and interprets it appropriately given the operand width. + * Handles AddRegFrm instructions. + * + * @param insn - See readOpcodeModifier(). + * @param size - The width (in bytes) of the register being specified. + * 1 means AL and friends, 2 means AX, 4 means EAX, and 8 means + * RAX. + */ +static void readOpcodeRegister(struct InternalInstruction* insn, uint8_t size) { + dbgprintf(insn, "readOpcodeRegister()"); + + readOpcodeModifier(insn); + + if (size == 0) + size = insn->registerSize; + + switch (size) { + case 1: + insn->opcodeRegister = (Reg)(MODRM_REG_AL + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + if(insn->rexPrefix && + insn->opcodeRegister >= MODRM_REG_AL + 0x4 && + insn->opcodeRegister < MODRM_REG_AL + 0x8) { + insn->opcodeRegister = (Reg)(MODRM_REG_SPL + + (insn->opcodeRegister - MODRM_REG_AL - 4)); + } + + break; + case 2: + insn->opcodeRegister = (Reg)(MODRM_REG_AX + + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + break; + case 4: + insn->opcodeRegister = (Reg)(MODRM_REG_EAX + + + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + break; + case 8: + insn->opcodeRegister = (Reg)(MODRM_REG_RAX + + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + break; + } +} + +/* + * readImmediate - Consumes an immediate operand from an instruction, given the + * desired operand size. + * + * @param insn - The instruction whose operand is to be read. + * @param size - The width (in bytes) of the operand. + * @return - 0 if the immediate was successfully consumed; nonzero + * otherwise. + */ +static int readImmediate(struct InternalInstruction* insn, uint8_t size) { + uint8_t imm8; + uint16_t imm16; + uint32_t imm32; + uint64_t imm64; + + dbgprintf(insn, "readImmediate()"); + + if (insn->numImmediatesConsumed == 2) + unreachable("Already consumed two immediates"); + + if (size == 0) + size = insn->immediateSize; + else + insn->immediateSize = size; + + switch (size) { + case 1: + if (consumeByte(insn, &imm8)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm8; + break; + case 2: + if (consumeUInt16(insn, &imm16)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm16; + break; + case 4: + if (consumeUInt32(insn, &imm32)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm32; + break; + case 8: + if (consumeUInt64(insn, &imm64)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm64; + break; + } + + insn->numImmediatesConsumed++; + + return 0; +} + +/* + * readOperands - Consults the specifier for an instruction and consumes all + * operands for that instruction, interpreting them as it goes. + * + * @param insn - The instruction whose operands are to be read and interpreted. + * @return - 0 if all operands could be read; nonzero otherwise. + */ +static int readOperands(struct InternalInstruction* insn) { + int index; + + dbgprintf(insn, "readOperands()"); + + for (index = 0; index < X86_MAX_OPERANDS; ++index) { + switch (insn->spec->operands[index].encoding) { + case ENCODING_NONE: + break; + case ENCODING_REG: + case ENCODING_RM: + if (readModRM(insn)) + return -1; + if (fixupReg(insn, &insn->spec->operands[index])) + return -1; + break; + case ENCODING_CB: + case ENCODING_CW: + case ENCODING_CD: + case ENCODING_CP: + case ENCODING_CO: + case ENCODING_CT: + dbgprintf(insn, "We currently don't hande code-offset encodings"); + return -1; + case ENCODING_IB: + if (readImmediate(insn, 1)) + return -1; + break; + case ENCODING_IW: + if (readImmediate(insn, 2)) + return -1; + break; + case ENCODING_ID: + if (readImmediate(insn, 4)) + return -1; + break; + case ENCODING_IO: + if (readImmediate(insn, 8)) + return -1; + break; + case ENCODING_Iv: + readImmediate(insn, insn->immediateSize); + break; + case ENCODING_Ia: + readImmediate(insn, insn->addressSize); + break; + case ENCODING_RB: + readOpcodeRegister(insn, 1); + break; + case ENCODING_RW: + readOpcodeRegister(insn, 2); + break; + case ENCODING_RD: + readOpcodeRegister(insn, 4); + break; + case ENCODING_RO: + readOpcodeRegister(insn, 8); + break; + case ENCODING_Rv: + readOpcodeRegister(insn, 0); + break; + case ENCODING_I: + readOpcodeModifier(insn); + break; + case ENCODING_DUP: + break; + default: + dbgprintf(insn, "Encountered an operand with an unknown encoding."); + return -1; + } + } + + return 0; +} + +/* + * decodeInstruction - Reads and interprets a full instruction provided by the + * user. + * + * @param insn - A pointer to the instruction to be populated. Must be + * pre-allocated. + * @param reader - The function to be used to read the instruction's bytes. + * @param readerArg - A generic argument to be passed to the reader to store + * any internal state. + * @param logger - If non-NULL, the function to be used to write log messages + * and warnings. + * @param loggerArg - A generic argument to be passed to the logger to store + * any internal state. + * @param startLoc - The address (in the reader's address space) of the first + * byte in the instruction. + * @param mode - The mode (real mode, IA-32e, or IA-32e in 64-bit mode) to + * decode the instruction in. + * @return - 0 if the instruction's memory could be read; nonzero if + * not. + */ +int decodeInstruction(struct InternalInstruction* insn, + byteReader_t reader, + void* readerArg, + dlog_t logger, + void* loggerArg, + uint64_t startLoc, + DisassemblerMode mode) { + memset(insn, 0, sizeof(struct InternalInstruction)); + + insn->reader = reader; + insn->readerArg = readerArg; + insn->dlog = logger; + insn->dlogArg = loggerArg; + insn->startLocation = startLoc; + insn->readerCursor = startLoc; + insn->mode = mode; + insn->numImmediatesConsumed = 0; + + if (readPrefixes(insn) || + readOpcode(insn) || + getID(insn) || + insn->instructionID == 0 || + readOperands(insn)) + return -1; + + insn->length = insn->readerCursor - insn->startLocation; + + dbgprintf(insn, "Read from 0x%llx to 0x%llx: length %llu", + startLoc, insn->readerCursor, insn->length); + + if (insn->length > 15) + dbgprintf(insn, "Instruction exceeds 15-byte limit"); + + return 0; +} diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h new file mode 100644 index 0000000..c03c07a --- /dev/null +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -0,0 +1,515 @@ +/*===- X86DisassemblerDecoderInternal.h - Disassembler decoder -----*- C -*-==* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file is part of the X86 Disassembler. + * It contains the public interface of the instruction decoder. + * Documentation for the disassembler can be found in X86Disassembler.h. + * + *===----------------------------------------------------------------------===*/ + +#ifndef X86DISASSEMBLERDECODER_H +#define X86DISASSEMBLERDECODER_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define INSTRUCTION_SPECIFIER_FIELDS \ + const char* name; + +#define INSTRUCTION_IDS \ + InstrUID* instructionIDs; + +#include "X86DisassemblerDecoderCommon.h" + +#undef INSTRUCTION_SPECIFIER_FIELDS +#undef INSTRUCTION_IDS + +/* + * Accessor functions for various fields of an Intel instruction + */ +#define modFromModRM(modRM) ((modRM & 0xc0) >> 6) +#define regFromModRM(modRM) ((modRM & 0x38) >> 3) +#define rmFromModRM(modRM) (modRM & 0x7) +#define scaleFromSIB(sib) ((sib & 0xc0) >> 6) +#define indexFromSIB(sib) ((sib & 0x38) >> 3) +#define baseFromSIB(sib) (sib & 0x7) +#define wFromREX(rex) ((rex & 0x8) >> 3) +#define rFromREX(rex) ((rex & 0x4) >> 2) +#define xFromREX(rex) ((rex & 0x2) >> 1) +#define bFromREX(rex) (rex & 0x1) + +/* + * These enums represent Intel registers for use by the decoder. + */ + +#define REGS_8BIT \ + ENTRY(AL) \ + ENTRY(CL) \ + ENTRY(DL) \ + ENTRY(BL) \ + ENTRY(AH) \ + ENTRY(CH) \ + ENTRY(DH) \ + ENTRY(BH) \ + ENTRY(R8B) \ + ENTRY(R9B) \ + ENTRY(R10B) \ + ENTRY(R11B) \ + ENTRY(R12B) \ + ENTRY(R13B) \ + ENTRY(R14B) \ + ENTRY(R15B) \ + ENTRY(SPL) \ + ENTRY(BPL) \ + ENTRY(SIL) \ + ENTRY(DIL) + +#define EA_BASES_16BIT \ + ENTRY(BX_SI) \ + ENTRY(BX_DI) \ + ENTRY(BP_SI) \ + ENTRY(BP_DI) \ + ENTRY(SI) \ + ENTRY(DI) \ + ENTRY(BP) \ + ENTRY(BX) \ + ENTRY(R8W) \ + ENTRY(R9W) \ + ENTRY(R10W) \ + ENTRY(R11W) \ + ENTRY(R12W) \ + ENTRY(R13W) \ + ENTRY(R14W) \ + ENTRY(R15W) + +#define REGS_16BIT \ + ENTRY(AX) \ + ENTRY(CX) \ + ENTRY(DX) \ + ENTRY(BX) \ + ENTRY(SP) \ + ENTRY(BP) \ + ENTRY(SI) \ + ENTRY(DI) \ + ENTRY(R8W) \ + ENTRY(R9W) \ + ENTRY(R10W) \ + ENTRY(R11W) \ + ENTRY(R12W) \ + ENTRY(R13W) \ + ENTRY(R14W) \ + ENTRY(R15W) + +#define EA_BASES_32BIT \ + ENTRY(EAX) \ + ENTRY(ECX) \ + ENTRY(EDX) \ + ENTRY(EBX) \ + ENTRY(sib) \ + ENTRY(EBP) \ + ENTRY(ESI) \ + ENTRY(EDI) \ + ENTRY(R8D) \ + ENTRY(R9D) \ + ENTRY(R10D) \ + ENTRY(R11D) \ + ENTRY(R12D) \ + ENTRY(R13D) \ + ENTRY(R14D) \ + ENTRY(R15D) + +#define REGS_32BIT \ + ENTRY(EAX) \ + ENTRY(ECX) \ + ENTRY(EDX) \ + ENTRY(EBX) \ + ENTRY(ESP) \ + ENTRY(EBP) \ + ENTRY(ESI) \ + ENTRY(EDI) \ + ENTRY(R8D) \ + ENTRY(R9D) \ + ENTRY(R10D) \ + ENTRY(R11D) \ + ENTRY(R12D) \ + ENTRY(R13D) \ + ENTRY(R14D) \ + ENTRY(R15D) + +#define EA_BASES_64BIT \ + ENTRY(RAX) \ + ENTRY(RCX) \ + ENTRY(RDX) \ + ENTRY(RBX) \ + ENTRY(sib64) \ + ENTRY(RBP) \ + ENTRY(RSI) \ + ENTRY(RDI) \ + ENTRY(R8) \ + ENTRY(R9) \ + ENTRY(R10) \ + ENTRY(R11) \ + ENTRY(R12) \ + ENTRY(R13) \ + ENTRY(R14) \ + ENTRY(R15) + +#define REGS_64BIT \ + ENTRY(RAX) \ + ENTRY(RCX) \ + ENTRY(RDX) \ + ENTRY(RBX) \ + ENTRY(RSP) \ + ENTRY(RBP) \ + ENTRY(RSI) \ + ENTRY(RDI) \ + ENTRY(R8) \ + ENTRY(R9) \ + ENTRY(R10) \ + ENTRY(R11) \ + ENTRY(R12) \ + ENTRY(R13) \ + ENTRY(R14) \ + ENTRY(R15) + +#define REGS_MMX \ + ENTRY(MM0) \ + ENTRY(MM1) \ + ENTRY(MM2) \ + ENTRY(MM3) \ + ENTRY(MM4) \ + ENTRY(MM5) \ + ENTRY(MM6) \ + ENTRY(MM7) + +#define REGS_XMM \ + ENTRY(XMM0) \ + ENTRY(XMM1) \ + ENTRY(XMM2) \ + ENTRY(XMM3) \ + ENTRY(XMM4) \ + ENTRY(XMM5) \ + ENTRY(XMM6) \ + ENTRY(XMM7) \ + ENTRY(XMM8) \ + ENTRY(XMM9) \ + ENTRY(XMM10) \ + ENTRY(XMM11) \ + ENTRY(XMM12) \ + ENTRY(XMM13) \ + ENTRY(XMM14) \ + ENTRY(XMM15) + +#define REGS_SEGMENT \ + ENTRY(ES) \ + ENTRY(CS) \ + ENTRY(SS) \ + ENTRY(DS) \ + ENTRY(FS) \ + ENTRY(GS) + +#define REGS_DEBUG \ + ENTRY(DR0) \ + ENTRY(DR1) \ + ENTRY(DR2) \ + ENTRY(DR3) \ + ENTRY(DR4) \ + ENTRY(DR5) \ + ENTRY(DR6) \ + ENTRY(DR7) + +#define REGS_CONTROL_32BIT \ + ENTRY(ECR0) \ + ENTRY(ECR1) \ + ENTRY(ECR2) \ + ENTRY(ECR3) \ + ENTRY(ECR4) \ + ENTRY(ECR5) \ + ENTRY(ECR6) \ + ENTRY(ECR7) + +#define REGS_CONTROL_64BIT \ + ENTRY(RCR0) \ + ENTRY(RCR1) \ + ENTRY(RCR2) \ + ENTRY(RCR3) \ + ENTRY(RCR4) \ + ENTRY(RCR5) \ + ENTRY(RCR6) \ + ENTRY(RCR7) \ + ENTRY(RCR8) + +#define ALL_EA_BASES \ + EA_BASES_16BIT \ + EA_BASES_32BIT \ + EA_BASES_64BIT + +#define ALL_SIB_BASES \ + REGS_32BIT \ + REGS_64BIT + +#define ALL_REGS \ + REGS_8BIT \ + REGS_16BIT \ + REGS_32BIT \ + REGS_64BIT \ + REGS_MMX \ + REGS_XMM \ + REGS_SEGMENT \ + REGS_DEBUG \ + REGS_CONTROL_32BIT \ + REGS_CONTROL_64BIT \ + ENTRY(RIP) + +/* + * EABase - All possible values of the base field for effective-address + * computations, a.k.a. the Mod and R/M fields of the ModR/M byte. We + * distinguish between bases (EA_BASE_*) and registers that just happen to be + * referred to when Mod == 0b11 (EA_REG_*). + */ +typedef enum { + EA_BASE_NONE, +#define ENTRY(x) EA_BASE_##x, + ALL_EA_BASES +#undef ENTRY +#define ENTRY(x) EA_REG_##x, + ALL_REGS +#undef ENTRY + EA_max +} EABase; + +/* + * SIBIndex - All possible values of the SIB index field. + * Borrows entries from ALL_EA_BASES with the special case that + * sib is synonymous with NONE. + */ +typedef enum { + SIB_INDEX_NONE, +#define ENTRY(x) SIB_INDEX_##x, + ALL_EA_BASES +#undef ENTRY + SIB_INDEX_max +} SIBIndex; + +/* + * SIBBase - All possible values of the SIB base field. + */ +typedef enum { + SIB_BASE_NONE, +#define ENTRY(x) SIB_BASE_##x, + ALL_SIB_BASES +#undef ENTRY + SIB_BASE_max +} SIBBase; + +/* + * EADisplacement - Possible displacement types for effective-address + * computations. + */ +typedef enum { + EA_DISP_NONE, + EA_DISP_8, + EA_DISP_16, + EA_DISP_32 +} EADisplacement; + +/* + * Reg - All possible values of the reg field in the ModR/M byte. + */ +typedef enum { +#define ENTRY(x) MODRM_REG_##x, + ALL_REGS +#undef ENTRY + MODRM_REG_max +} Reg; + +/* + * SegmentOverride - All possible segment overrides. + */ +typedef enum { + SEG_OVERRIDE_NONE, + SEG_OVERRIDE_CS, + SEG_OVERRIDE_SS, + SEG_OVERRIDE_DS, + SEG_OVERRIDE_ES, + SEG_OVERRIDE_FS, + SEG_OVERRIDE_GS, + SEG_OVERRIDE_max +} SegmentOverride; + +typedef uint8_t BOOL; + +/* + * byteReader_t - Type for the byte reader that the consumer must provide to + * the decoder. Reads a single byte from the instruction's address space. + * @param arg - A baton that the consumer can associate with any internal + * state that it needs. + * @param byte - A pointer to a single byte in memory that should be set to + * contain the value at address. + * @param address - The address in the instruction's address space that should + * be read from. + * @return - -1 if the byte cannot be read for any reason; 0 otherwise. + */ +typedef int (*byteReader_t)(void* arg, uint8_t* byte, uint64_t address); + +/* + * dlog_t - Type for the logging function that the consumer can provide to + * get debugging output from the decoder. + * @param arg - A baton that the consumer can associate with any internal + * state that it needs. + * @param log - A string that contains the message. Will be reused after + * the logger returns. + */ +typedef void (*dlog_t)(void* arg, const char *log); + +/* + * The x86 internal instruction, which is produced by the decoder. + */ +struct InternalInstruction { + /* Reader interface (C) */ + byteReader_t reader; + /* Opaque value passed to the reader */ + void* readerArg; + /* The address of the next byte to read via the reader */ + uint64_t readerCursor; + + /* Logger interface (C) */ + dlog_t dlog; + /* Opaque value passed to the logger */ + void* dlogArg; + + /* General instruction information */ + + /* The mode to disassemble for (64-bit, protected, real) */ + DisassemblerMode mode; + /* The start of the instruction, usable with the reader */ + uint64_t startLocation; + /* The length of the instruction, in bytes */ + size_t length; + + /* Prefix state */ + + /* 1 if the prefix byte corresponding to the entry is present; 0 if not */ + uint8_t prefixPresent[0x100]; + /* contains the location (for use with the reader) of the prefix byte */ + uint64_t prefixLocations[0x100]; + /* The value of the REX prefix, if present */ + uint8_t rexPrefix; + /* The location of the REX prefix */ + uint64_t rexLocation; + /* The location where a mandatory prefix would have to be (i.e., right before + the opcode, or right before the REX prefix if one is present) */ + uint64_t necessaryPrefixLocation; + /* The segment override type */ + SegmentOverride segmentOverride; + + /* Sizes of various critical pieces of data */ + uint8_t registerSize; + uint8_t addressSize; + uint8_t displacementSize; + uint8_t immediateSize; + + /* opcode state */ + + /* The value of the two-byte escape prefix (usually 0x0f) */ + uint8_t twoByteEscape; + /* The value of the three-byte escape prefix (usually 0x38 or 0x3a) */ + uint8_t threeByteEscape; + /* The last byte of the opcode, not counting any ModR/M extension */ + uint8_t opcode; + /* The ModR/M byte of the instruction, if it is an opcode extension */ + uint8_t modRMExtension; + + /* decode state */ + + /* The type of opcode, used for indexing into the array of decode tables */ + OpcodeType opcodeType; + /* The instruction ID, extracted from the decode table */ + uint16_t instructionID; + /* The specifier for the instruction, from the instruction info table */ + struct InstructionSpecifier* spec; + + /* state for additional bytes, consumed during operand decode. Pattern: + consumed___ indicates that the byte was already consumed and does not + need to be consumed again */ + + /* The ModR/M byte, which contains most register operands and some portion of + all memory operands */ + BOOL consumedModRM; + uint8_t modRM; + + /* The SIB byte, used for more complex 32- or 64-bit memory operands */ + BOOL consumedSIB; + uint8_t sib; + + /* The displacement, used for memory operands */ + BOOL consumedDisplacement; + int32_t displacement; + + /* Immediates. There can be two in some cases */ + uint8_t numImmediatesConsumed; + uint8_t numImmediatesTranslated; + uint64_t immediates[2]; + + /* A register or immediate operand encoded into the opcode */ + BOOL consumedOpcodeModifier; + uint8_t opcodeModifier; + Reg opcodeRegister; + + /* Portions of the ModR/M byte */ + + /* These fields determine the allowable values for the ModR/M fields, which + depend on operand and address widths */ + EABase eaBaseBase; + EABase eaRegBase; + Reg regBase; + + /* The Mod and R/M fields can encode a base for an effective address, or a + register. These are separated into two fields here */ + EABase eaBase; + EADisplacement eaDisplacement; + /* The reg field always encodes a register */ + Reg reg; + + /* SIB state */ + SIBIndex sibIndex; + uint8_t sibScale; + SIBBase sibBase; +}; + +/* decodeInstruction - Decode one instruction and store the decoding results in + * a buffer provided by the consumer. + * @param insn - The buffer to store the instruction in. Allocated by the + * consumer. + * @param reader - The byteReader_t for the bytes to be read. + * @param readerArg - An argument to pass to the reader for storing context + * specific to the consumer. May be NULL. + * @param logger - The dlog_t to be used in printing status messages from the + * disassembler. May be NULL. + * @param loggerArg - An argument to pass to the logger for storing context + * specific to the logger. May be NULL. + * @param startLoc - The address (in the reader's address space) of the first + * byte in the instruction. + * @param mode - The mode (16-bit, 32-bit, 64-bit) to decode in. + * @return - Nonzero if there was an error during decode, 0 otherwise. + */ +int decodeInstruction(struct InternalInstruction* insn, + byteReader_t reader, + void* readerArg, + dlog_t logger, + void* loggerArg, + uint64_t startLoc, + DisassemblerMode mode); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h new file mode 100644 index 0000000..c213f89 --- /dev/null +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h @@ -0,0 +1,355 @@ +/*===- X86DisassemblerDecoderCommon.h - Disassembler decoder -------*- C -*-==* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file is part of the X86 Disassembler. + * It contains common definitions used by both the disassembler and the table + * generator. + * Documentation for the disassembler can be found in X86Disassembler.h. + * + *===----------------------------------------------------------------------===*/ + +/* + * This header file provides those definitions that need to be shared between + * the decoder and the table generator in a C-friendly manner. + */ + +#ifndef X86DISASSEMBLERDECODERCOMMON_H +#define X86DISASSEMBLERDECODERCOMMON_H + +#include "llvm/System/DataTypes.h" + +#define INSTRUCTIONS_SYM x86DisassemblerInstrSpecifiers +#define CONTEXTS_SYM x86DisassemblerContexts +#define ONEBYTE_SYM x86DisassemblerOneByteOpcodes +#define TWOBYTE_SYM x86DisassemblerTwoByteOpcodes +#define THREEBYTE38_SYM x86DisassemblerThreeByte38Opcodes +#define THREEBYTE3A_SYM x86DisassemblerThreeByte3AOpcodes + +#define INSTRUCTIONS_STR "x86DisassemblerInstrSpecifiers" +#define CONTEXTS_STR "x86DisassemblerContexts" +#define ONEBYTE_STR "x86DisassemblerOneByteOpcodes" +#define TWOBYTE_STR "x86DisassemblerTwoByteOpcodes" +#define THREEBYTE38_STR "x86DisassemblerThreeByte38Opcodes" +#define THREEBYTE3A_STR "x86DisassemblerThreeByte3AOpcodes" + +/* + * Attributes of an instruction that must be known before the opcode can be + * processed correctly. Most of these indicate the presence of particular + * prefixes, but ATTR_64BIT is simply an attribute of the decoding context. + */ +#define ATTRIBUTE_BITS \ + ENUM_ENTRY(ATTR_NONE, 0x00) \ + ENUM_ENTRY(ATTR_64BIT, 0x01) \ + ENUM_ENTRY(ATTR_XS, 0x02) \ + ENUM_ENTRY(ATTR_XD, 0x04) \ + ENUM_ENTRY(ATTR_REXW, 0x08) \ + ENUM_ENTRY(ATTR_OPSIZE, 0x10) + +#define ENUM_ENTRY(n, v) n = v, +enum attributeBits { + ATTRIBUTE_BITS + ATTR_max +}; +#undef ENUM_ENTRY + +/* + * Combinations of the above attributes that are relevant to instruction + * decode. Although other combinations are possible, they can be reduced to + * these without affecting the ultimately decoded instruction. + */ + +/* Class name Rank Rationale for rank assignment */ +#define INSTRUCTION_CONTEXTS \ + ENUM_ENTRY(IC, 0, "says nothing about the instruction") \ + ENUM_ENTRY(IC_64BIT, 1, "says the instruction applies in " \ + "64-bit mode but no more") \ + ENUM_ENTRY(IC_OPSIZE, 3, "requires an OPSIZE prefix, so " \ + "operands change width") \ + ENUM_ENTRY(IC_XD, 2, "may say something about the opcode " \ + "but not the operands") \ + ENUM_ENTRY(IC_XS, 2, "may say something about the opcode " \ + "but not the operands") \ + ENUM_ENTRY(IC_64BIT_REXW, 4, "requires a REX.W prefix, so operands "\ + "change width; overrides IC_OPSIZE") \ + ENUM_ENTRY(IC_64BIT_OPSIZE, 3, "Just as meaningful as IC_OPSIZE") \ + ENUM_ENTRY(IC_64BIT_XD, 5, "XD instructions are SSE; REX.W is " \ + "secondary") \ + ENUM_ENTRY(IC_64BIT_XS, 5, "Just as meaningful as IC_64BIT_XD") \ + ENUM_ENTRY(IC_64BIT_REXW_XS, 6, "OPSIZE could mean a different " \ + "opcode") \ + ENUM_ENTRY(IC_64BIT_REXW_XD, 6, "Just as meaningful as " \ + "IC_64BIT_REXW_XS") \ + ENUM_ENTRY(IC_64BIT_REXW_OPSIZE, 7, "The Dynamic Duo! Prefer over all " \ + "else because this changes most " \ + "operands' meaning") + +#define ENUM_ENTRY(n, r, d) n, +typedef enum { + INSTRUCTION_CONTEXTS + IC_max +} InstructionContext; +#undef ENUM_ENTRY + +/* + * Opcode types, which determine which decode table to use, both in the Intel + * manual and also for the decoder. + */ +typedef enum { + ONEBYTE = 0, + TWOBYTE = 1, + THREEBYTE_38 = 2, + THREEBYTE_3A = 3 +} OpcodeType; + +/* + * The following structs are used for the hierarchical decode table. After + * determining the instruction's class (i.e., which IC_* constant applies to + * it), the decoder reads the opcode. Some instructions require specific + * values of the ModR/M byte, so the ModR/M byte indexes into the final table. + * + * If a ModR/M byte is not required, "required" is left unset, and the values + * for each instructionID are identical. + */ + +typedef uint16_t InstrUID; + +/* + * ModRMDecisionType - describes the type of ModR/M decision, allowing the + * consumer to determine the number of entries in it. + * + * MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded + * instruction is the same. + * MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode + * corresponds to one instruction; otherwise, it corresponds to + * a different instruction. + * MODRM_FULL - Potentially, each value of the ModR/M byte could correspond + * to a different instruction. + */ + +#define MODRMTYPES \ + ENUM_ENTRY(MODRM_ONEENTRY) \ + ENUM_ENTRY(MODRM_SPLITRM) \ + ENUM_ENTRY(MODRM_FULL) + +#define ENUM_ENTRY(n) n, +typedef enum { + MODRMTYPES + MODRM_max +} ModRMDecisionType; +#undef ENUM_ENTRY + +/* + * ModRMDecision - Specifies whether a ModR/M byte is needed and (if so) which + * instruction each possible value of the ModR/M byte corresponds to. Once + * this information is known, we have narrowed down to a single instruction. + */ +struct ModRMDecision { + uint8_t modrm_type; + + /* The macro below must be defined wherever this file is included. */ + INSTRUCTION_IDS +}; + +/* + * OpcodeDecision - Specifies which set of ModR/M->instruction tables to look at + * given a particular opcode. + */ +struct OpcodeDecision { + struct ModRMDecision modRMDecisions[256]; +}; + +/* + * ContextDecision - Specifies which opcode->instruction tables to look at given + * a particular context (set of attributes). Since there are many possible + * contexts, the decoder first uses CONTEXTS_SYM to determine which context + * applies given a specific set of attributes. Hence there are only IC_max + * entries in this table, rather than 2^(ATTR_max). + */ +struct ContextDecision { + struct OpcodeDecision opcodeDecisions[IC_max]; +}; + +/* + * Physical encodings of instruction operands. + */ + +#define ENCODINGS \ + ENUM_ENTRY(ENCODING_NONE, "") \ + ENUM_ENTRY(ENCODING_REG, "Register operand in ModR/M byte.") \ + ENUM_ENTRY(ENCODING_RM, "R/M operand in ModR/M byte.") \ + ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \ + ENUM_ENTRY(ENCODING_CW, "2-byte") \ + ENUM_ENTRY(ENCODING_CD, "4-byte") \ + ENUM_ENTRY(ENCODING_CP, "6-byte") \ + ENUM_ENTRY(ENCODING_CO, "8-byte") \ + ENUM_ENTRY(ENCODING_CT, "10-byte") \ + ENUM_ENTRY(ENCODING_IB, "1-byte immediate") \ + ENUM_ENTRY(ENCODING_IW, "2-byte") \ + ENUM_ENTRY(ENCODING_ID, "4-byte") \ + ENUM_ENTRY(ENCODING_IO, "8-byte") \ + ENUM_ENTRY(ENCODING_RB, "(AL..DIL, R8L..R15L) Register code added to " \ + "the opcode byte") \ + ENUM_ENTRY(ENCODING_RW, "(AX..DI, R8W..R15W)") \ + ENUM_ENTRY(ENCODING_RD, "(EAX..EDI, R8D..R15D)") \ + ENUM_ENTRY(ENCODING_RO, "(RAX..RDI, R8..R15)") \ + ENUM_ENTRY(ENCODING_I, "Position on floating-point stack added to the " \ + "opcode byte") \ + \ + ENUM_ENTRY(ENCODING_Iv, "Immediate of operand size") \ + ENUM_ENTRY(ENCODING_Ia, "Immediate of address size") \ + ENUM_ENTRY(ENCODING_Rv, "Register code of operand size added to the " \ + "opcode byte") \ + ENUM_ENTRY(ENCODING_DUP, "Duplicate of another operand; ID is encoded " \ + "in type") + +#define ENUM_ENTRY(n, d) n, + typedef enum { + ENCODINGS + ENCODING_max + } OperandEncoding; +#undef ENUM_ENTRY + +/* + * Semantic interpretations of instruction operands. + */ + +#define TYPES \ + ENUM_ENTRY(TYPE_NONE, "") \ + ENUM_ENTRY(TYPE_REL8, "1-byte immediate address") \ + ENUM_ENTRY(TYPE_REL16, "2-byte") \ + ENUM_ENTRY(TYPE_REL32, "4-byte") \ + ENUM_ENTRY(TYPE_REL64, "8-byte") \ + ENUM_ENTRY(TYPE_PTR1616, "2+2-byte segment+offset address") \ + ENUM_ENTRY(TYPE_PTR1632, "2+4-byte") \ + ENUM_ENTRY(TYPE_PTR1664, "2+8-byte") \ + ENUM_ENTRY(TYPE_R8, "1-byte register operand") \ + ENUM_ENTRY(TYPE_R16, "2-byte") \ + ENUM_ENTRY(TYPE_R32, "4-byte") \ + ENUM_ENTRY(TYPE_R64, "8-byte") \ + ENUM_ENTRY(TYPE_IMM8, "1-byte immediate operand") \ + ENUM_ENTRY(TYPE_IMM16, "2-byte") \ + ENUM_ENTRY(TYPE_IMM32, "4-byte") \ + ENUM_ENTRY(TYPE_IMM64, "8-byte") \ + ENUM_ENTRY(TYPE_RM8, "1-byte register or memory operand") \ + ENUM_ENTRY(TYPE_RM16, "2-byte") \ + ENUM_ENTRY(TYPE_RM32, "4-byte") \ + ENUM_ENTRY(TYPE_RM64, "8-byte") \ + ENUM_ENTRY(TYPE_M, "Memory operand") \ + ENUM_ENTRY(TYPE_M8, "1-byte") \ + ENUM_ENTRY(TYPE_M16, "2-byte") \ + ENUM_ENTRY(TYPE_M32, "4-byte") \ + ENUM_ENTRY(TYPE_M64, "8-byte") \ + ENUM_ENTRY(TYPE_LEA, "Effective address") \ + ENUM_ENTRY(TYPE_M128, "16-byte (SSE/SSE2)") \ + ENUM_ENTRY(TYPE_M1616, "2+2-byte segment+offset address") \ + ENUM_ENTRY(TYPE_M1632, "2+4-byte") \ + ENUM_ENTRY(TYPE_M1664, "2+8-byte") \ + ENUM_ENTRY(TYPE_M16_32, "2+4-byte two-part memory operand (LIDT, LGDT)") \ + ENUM_ENTRY(TYPE_M16_16, "2+2-byte (BOUND)") \ + ENUM_ENTRY(TYPE_M32_32, "4+4-byte (BOUND)") \ + ENUM_ENTRY(TYPE_M16_64, "2+8-byte (LIDT, LGDT)") \ + ENUM_ENTRY(TYPE_MOFFS8, "1-byte memory offset (relative to segment " \ + "base)") \ + ENUM_ENTRY(TYPE_MOFFS16, "2-byte") \ + ENUM_ENTRY(TYPE_MOFFS32, "4-byte") \ + ENUM_ENTRY(TYPE_MOFFS64, "8-byte") \ + ENUM_ENTRY(TYPE_SREG, "Byte with single bit set: 0 = ES, 1 = CS, " \ + "2 = SS, 3 = DS, 4 = FS, 5 = GS") \ + ENUM_ENTRY(TYPE_M32FP, "32-bit IEE754 memory floating-point operand") \ + ENUM_ENTRY(TYPE_M64FP, "64-bit") \ + ENUM_ENTRY(TYPE_M80FP, "80-bit extended") \ + ENUM_ENTRY(TYPE_M16INT, "2-byte memory integer operand for use in " \ + "floating-point instructions") \ + ENUM_ENTRY(TYPE_M32INT, "4-byte") \ + ENUM_ENTRY(TYPE_M64INT, "8-byte") \ + ENUM_ENTRY(TYPE_ST, "Position on the floating-point stack") \ + ENUM_ENTRY(TYPE_MM, "MMX register operand") \ + ENUM_ENTRY(TYPE_MM32, "4-byte MMX register or memory operand") \ + ENUM_ENTRY(TYPE_MM64, "8-byte") \ + ENUM_ENTRY(TYPE_XMM, "XMM register operand") \ + ENUM_ENTRY(TYPE_XMM32, "4-byte XMM register or memory operand") \ + ENUM_ENTRY(TYPE_XMM64, "8-byte") \ + ENUM_ENTRY(TYPE_XMM128, "16-byte") \ + ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \ + ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \ + ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \ + ENUM_ENTRY(TYPE_CR32, "4-byte control register operand") \ + ENUM_ENTRY(TYPE_CR64, "8-byte") \ + \ + ENUM_ENTRY(TYPE_Mv, "Memory operand of operand size") \ + ENUM_ENTRY(TYPE_Rv, "Register operand of operand size") \ + ENUM_ENTRY(TYPE_IMMv, "Immediate operand of operand size") \ + ENUM_ENTRY(TYPE_RELv, "Immediate address of operand size") \ + ENUM_ENTRY(TYPE_DUP0, "Duplicate of operand 0") \ + ENUM_ENTRY(TYPE_DUP1, "operand 1") \ + ENUM_ENTRY(TYPE_DUP2, "operand 2") \ + ENUM_ENTRY(TYPE_DUP3, "operand 3") \ + ENUM_ENTRY(TYPE_DUP4, "operand 4") \ + ENUM_ENTRY(TYPE_M512, "512-bit FPU/MMX/XMM/MXCSR state") + +#define ENUM_ENTRY(n, d) n, +typedef enum { + TYPES + TYPE_max +} OperandType; +#undef ENUM_ENTRY + +/* + * OperandSpecifier - The specification for how to extract and interpret one + * operand. + */ +struct OperandSpecifier { + OperandEncoding encoding; + OperandType type; +}; + +/* + * Indicates where the opcode modifier (if any) is to be found. Extended + * opcodes with AddRegFrm have the opcode modifier in the ModR/M byte. + */ + +#define MODIFIER_TYPES \ + ENUM_ENTRY(MODIFIER_NONE) \ + ENUM_ENTRY(MODIFIER_OPCODE) \ + ENUM_ENTRY(MODIFIER_MODRM) + +#define ENUM_ENTRY(n) n, +typedef enum { + MODIFIER_TYPES + MODIFIER_max +} ModifierType; +#undef ENUM_ENTRY + +#define X86_MAX_OPERANDS 5 + +/* + * The specification for how to extract and interpret a full instruction and + * its operands. + */ +struct InstructionSpecifier { + ModifierType modifierType; + uint8_t modifierBase; + struct OperandSpecifier operands[X86_MAX_OPERANDS]; + + /* The macro below must be defined wherever this file is included. */ + INSTRUCTION_SPECIFIER_FIELDS +}; + +/* + * Decoding mode for the Intel disassembler. 16-bit, 32-bit, and 64-bit mode + * are supported, and represent real mode, IA-32e, and IA-32e in 64-bit mode, + * respectively. + */ +typedef enum { + MODE_16BIT, + MODE_32BIT, + MODE_64BIT +} DisassemblerMode; + +#endif diff --git a/lib/Target/X86/Makefile b/lib/Target/X86/Makefile index b311a6e..6098dbf 100644 --- a/lib/Target/X86/Makefile +++ b/lib/Target/X86/Makefile @@ -15,8 +15,8 @@ BUILT_SOURCES = X86GenRegisterInfo.h.inc X86GenRegisterNames.inc \ X86GenRegisterInfo.inc X86GenInstrNames.inc \ X86GenInstrInfo.inc X86GenAsmWriter.inc X86GenAsmMatcher.inc \ X86GenAsmWriter1.inc X86GenDAGISel.inc \ - X86GenFastISel.inc \ - X86GenCallingConv.inc X86GenSubtarget.inc + X86GenDisassemblerTables.inc X86GenFastISel.inc \ + X86GenCallingConv.inc X86GenSubtarget.inc \ DIRS = AsmPrinter AsmParser Disassembler TargetInfo diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index 9b7aab8..afd9f53 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -123,20 +123,6 @@ when it can invert the result of the compare for free. //===---------------------------------------------------------------------===// -How about intrinsics? An example is: - *res = _mm_mulhi_epu16(*A, _mm_mul_epu32(*B, *C)); - -compiles to - pmuludq (%eax), %xmm0 - movl 8(%esp), %eax - movdqa (%eax), %xmm1 - pmulhuw %xmm0, %xmm1 - -The transformation probably requires a X86 specific pass or a DAG combiner -target specific hook. - -//===---------------------------------------------------------------------===// - In many cases, LLVM generates code like this: _test: @@ -1762,6 +1748,11 @@ LBB1_1: ## bb1 cmpl $150, %edi jne LBB1_1 ## bb1 +The issue is that we hoist the cast of "scaler" to long long outside of the +loop, the value comes into the loop as two values, and +RegsForValue::getCopyFromRegs doesn't know how to put an AssertSext on the +constructed BUILD_PAIR which represents the cast value. + //===---------------------------------------------------------------------===// Test instructions can be eliminated by using EFLAGS values from arithmetic diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index da467fe..a6e1ca3 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -63,7 +63,7 @@ def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true", def FeatureAVX : SubtargetFeature<"avx", "HasAVX", "true", "Enable AVX instructions">; def FeatureFMA3 : SubtargetFeature<"fma3", "HasFMA3", "true", - "Enable three-operand fused multiple-add">; + "Enable three-operand fused multiple-add">; def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true", "Enable four-operand fused multiple-add">; diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index a9a78be..cb82383 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -50,9 +50,6 @@ #include "llvm/ADT/Statistic.h" using namespace llvm; -#include "llvm/Support/CommandLine.h" -static cl::opt<bool> AvoidDupAddrCompute("x86-avoid-dup-address", cl::Hidden); - STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); //===----------------------------------------------------------------------===// @@ -1275,28 +1272,7 @@ bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base, SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment) { X86ISelAddressMode AM; - bool Done = false; - if (AvoidDupAddrCompute && !N.hasOneUse()) { - unsigned Opcode = N.getOpcode(); - if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex && - Opcode != X86ISD::Wrapper && Opcode != X86ISD::WrapperRIP) { - // If we are able to fold N into addressing mode, then we'll allow it even - // if N has multiple uses. In general, addressing computation is used as - // addresses by all of its uses. But watch out for CopyToReg uses, that - // means the address computation is liveout. It will be computed by a LEA - // so we want to avoid computing the address twice. - for (SDNode::use_iterator UI = N.getNode()->use_begin(), - UE = N.getNode()->use_end(); UI != UE; ++UI) { - if (UI->getOpcode() == ISD::CopyToReg) { - MatchAddressBase(N, AM); - Done = true; - break; - } - } - } - } - - if (!Done && MatchAddress(N, AM)) + if (MatchAddress(N, AM)) return false; EVT VT = N.getValueType(); @@ -1891,27 +1867,28 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) { } } - unsigned LoReg, HiReg; + unsigned LoReg, HiReg, ClrReg; unsigned ClrOpcode, SExtOpcode; + EVT ClrVT = NVT; switch (NVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Unsupported VT!"); case MVT::i8: - LoReg = X86::AL; HiReg = X86::AH; + LoReg = X86::AL; ClrReg = HiReg = X86::AH; ClrOpcode = 0; SExtOpcode = X86::CBW; break; case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; - ClrOpcode = X86::MOV16r0; + ClrOpcode = X86::MOV32r0; ClrReg = X86::EDX; ClrVT = MVT::i32; SExtOpcode = X86::CWD; break; case MVT::i32: - LoReg = X86::EAX; HiReg = X86::EDX; + LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; ClrOpcode = X86::MOV32r0; SExtOpcode = X86::CDQ; break; case MVT::i64: - LoReg = X86::RAX; HiReg = X86::RDX; + LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; ClrOpcode = ~0U; // NOT USED. SExtOpcode = X86::CQO; break; @@ -1966,10 +1943,10 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) { MVT::i64, Zero, ClrNode, SubRegNo), 0); } else { - ClrNode = SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); + ClrNode = SDValue(CurDAG->getMachineNode(ClrOpcode, dl, ClrVT), 0); } - InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg, + InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, ClrNode, InFlag).getValue(1); } } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0517b56..c722fbf 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -980,6 +980,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::SRL); setTargetDAGCombine(ISD::STORE); setTargetDAGCombine(ISD::MEMBARRIER); + setTargetDAGCombine(ISD::ZERO_EXTEND); if (Subtarget->is64Bit()) setTargetDAGCombine(ISD::MUL); @@ -4583,7 +4584,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { MVT::v4i32, Vec), Op.getOperand(1))); // Transform it so it match pextrw which produces a 32-bit result. - EVT EltVT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy+1); + EVT EltVT = MVT::i32; SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, Op.getOperand(0), Op.getOperand(1)); SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, @@ -5127,12 +5128,9 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); else Tys = DAG.getVTList(Op.getValueType(), MVT::Other); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(StackSlot); - Ops.push_back(DAG.getValueType(SrcVT)); + SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; SDValue Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, dl, - Tys, &Ops[0], Ops.size()); + Tys, Ops, array_lengthof(Ops)); if (useSSE) { Chain = Result.getValue(1); @@ -5145,13 +5143,10 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); Tys = DAG.getVTList(MVT::Other); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(Result); - Ops.push_back(StackSlot); - Ops.push_back(DAG.getValueType(Op.getValueType())); - Ops.push_back(InFlag); - Chain = DAG.getNode(X86ISD::FST, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { + Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag + }; + Chain = DAG.getNode(X86ISD::FST, dl, Tys, Ops, array_lengthof(Ops)); Result = DAG.getLoad(Op.getValueType(), dl, Chain, StackSlot, PseudoSourceValue::getFixedStack(SSFI), 0); } @@ -5752,14 +5747,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) { SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG); // Use sbb x, x to materialize carry bit into a GPR. - // FIXME: Temporarily disabled since it breaks self-hosting. It's apparently - // miscompiling ARMISelDAGToDAG.cpp. - if (0 && !isFP && X86CC == X86::COND_B) { + if (X86CC == X86::COND_B) return DAG.getNode(ISD::AND, dl, MVT::i8, DAG.getNode(X86ISD::SETCC_CARRY, dl, MVT::i8, DAG.getConstant(X86CC, MVT::i8), Cond), DAG.getConstant(1, MVT::i8)); - } return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, DAG.getConstant(X86CC, MVT::i8), Cond); @@ -5949,14 +5941,10 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) { } SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag); - SmallVector<SDValue, 4> Ops; // X86ISD::CMOV means set the result (which is operand 1) to the RHS if // condition is true. - Ops.push_back(Op.getOperand(2)); - Ops.push_back(Op.getOperand(1)); - Ops.push_back(CC); - Ops.push_back(Cond); - return DAG.getNode(X86ISD::CMOV, dl, VTs, &Ops[0], Ops.size()); + SDValue Ops[] = { Op.getOperand(2), Op.getOperand(1), CC, Cond }; + return DAG.getNode(X86ISD::CMOV, dl, VTs, Ops, array_lengthof(Ops)); } // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or @@ -6196,7 +6184,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false, false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/false, - DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl); + DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl, + DAG.GetOrdering(Chain.getNode())); return CallResult.second; } @@ -6266,11 +6255,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(DAG.getValueType(AVT)); - Ops.push_back(InFlag); - Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag }; + Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops)); if (TwoRepStos) { InFlag = Chain.getValue(1); @@ -6283,11 +6269,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, Left, InFlag); InFlag = Chain.getValue(1); Tys = DAG.getVTList(MVT::Other, MVT::Flag); - Ops.clear(); - Ops.push_back(Chain); - Ops.push_back(DAG.getValueType(MVT::i8)); - Ops.push_back(InFlag); - Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { Chain, DAG.getValueType(MVT::i8), InFlag }; + Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops)); } else if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; @@ -6351,11 +6334,9 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(DAG.getValueType(AVT)); - Ops.push_back(InFlag); - SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag }; + SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops, + array_lengthof(Ops)); SmallVector<SDValue, 4> Results; Results.push_back(RepMovs); @@ -6979,12 +6960,13 @@ SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) { Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); // If src is zero (i.e. bsr sets ZF), returns NumBits. - SmallVector<SDValue, 4> Ops; - Ops.push_back(Op); - Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); - Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); - Ops.push_back(Op.getValue(1)); - Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4); + SDValue Ops[] = { + Op, + DAG.getConstant(NumBits+NumBits-1, OpVT), + DAG.getConstant(X86::COND_E, MVT::i8), + Op.getValue(1) + }; + Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); // Finally xor with NumBits-1. Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); @@ -7011,12 +6993,13 @@ SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) { Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); // If src is zero (i.e. bsf sets ZF), returns NumBits. - SmallVector<SDValue, 4> Ops; - Ops.push_back(Op); - Ops.push_back(DAG.getConstant(NumBits, OpVT)); - Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); - Ops.push_back(Op.getValue(1)); - Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4); + SDValue Ops[] = { + Op, + DAG.getConstant(NumBits, OpVT), + DAG.getConstant(X86::COND_E, MVT::i8), + Op.getValue(1) + }; + Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); if (VT == MVT::i8) Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); @@ -9349,6 +9332,32 @@ static SDValue PerformMEMBARRIERCombine(SDNode* N, SelectionDAG &DAG) { } } +static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) { + // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> + // (and (i32 x86isd::setcc_carry), 1) + // This eliminates the zext. This transformation is necessary because + // ISD::SETCC is always legalized to i8. + DebugLoc dl = N->getDebugLoc(); + SDValue N0 = N->getOperand(0); + EVT VT = N->getValueType(0); + if (N0.getOpcode() == ISD::AND && + N0.hasOneUse() && + N0.getOperand(0).hasOneUse()) { + SDValue N00 = N0.getOperand(0); + if (N00.getOpcode() != X86ISD::SETCC_CARRY) + return SDValue(); + ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); + if (!C || C->getZExtValue() != 1) + return SDValue(); + return DAG.getNode(ISD::AND, dl, VT, + DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, + N00.getOperand(0), N00.getOperand(1)), + DAG.getConstant(1, VT)); + } + + return SDValue(); +} + SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -9368,6 +9377,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); case ISD::MEMBARRIER: return PerformMEMBARRIERCombine(N, DAG); + case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); } return SDValue(); diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index b6a2c05..65fbbda 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -111,6 +111,9 @@ def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), Requires<[In64BitMode]>; } +// Interrupt Instructions +def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iret{q}", []>; + //===----------------------------------------------------------------------===// // Call Instructions... // @@ -131,20 +134,21 @@ let isCall = 1 in // the 32-bit pcrel field that we have. def CALL64pcrel32 : Ii32<0xE8, RawFrm, (outs), (ins i64i32imm_pcrel:$dst, variable_ops), - "call\t$dst", []>, + "call{q}\t$dst", []>, Requires<[In64BitMode, NotWin64]>; def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), - "call\t{*}$dst", [(X86call GR64:$dst)]>, + "call{q}\t{*}$dst", [(X86call GR64:$dst)]>, Requires<[NotWin64]>; def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), - "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>, + "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>, Requires<[NotWin64]>; def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst), "lcall{q}\t{*}$dst", []>; } - // FIXME: We need to teach codegen about single list of call-clobbered registers. + // FIXME: We need to teach codegen about single list of call-clobbered + // registers. let isCall = 1 in // All calls clobber the non-callee saved registers. RSP is marked as // a use to prevent stack-pointer assignments that appear immediately @@ -162,9 +166,10 @@ let isCall = 1 in def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), "call\t{*}$dst", [(X86call GR64:$dst)]>, Requires<[IsWin64]>; - def WINCALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), - "call\t{*}$dst", - [(X86call (loadi64 addr:$dst))]>, Requires<[IsWin64]>; + def WINCALL64m : I<0xFF, MRM2m, (outs), + (ins i64mem:$dst, variable_ops), "call\t{*}$dst", + [(X86call (loadi64 addr:$dst))]>, + Requires<[IsWin64]>; } @@ -188,6 +193,8 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in // Branches let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { + def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst), + "jmp{q}\t$dst", []>; def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst", [(brind GR64:$dst)]>; def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst", @@ -210,6 +217,12 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), //===----------------------------------------------------------------------===// // Miscellaneous Instructions... // + +def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS; +def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS; + let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>; @@ -238,9 +251,9 @@ def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm), } let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in -def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W; +def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf{q}", []>, REX_W; let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in -def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>; +def PUSHFQ64 : I<0x9C, RawFrm, (outs), (ins), "pushf{q}", []>; def LEA64_32r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_32mem:$src), @@ -309,6 +322,9 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src), [(set GR64:$dst, i64immSExt32:$src)]>; } +def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>; + let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "mov{q}\t{$src, $dst|$dst, $src}", @@ -321,24 +337,36 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(store i64immSExt32:$src, addr:$dst)]>; -def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins i8imm:$src), +def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src), "mov{q}\t{$src, %rax|%rax, $src}", []>; -def MOV64o32a : RIi32<0xA1, RawFrm, (outs), (ins i32imm:$src), +def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src), "mov{q}\t{$src, %rax|%rax, $src}", []>; -def MOV64ao8 : RIi8<0xA2, RawFrm, (outs i8imm:$dst), (ins), +def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; -def MOV64ao32 : RIi32<0xA3, RawFrm, (outs i32imm:$dst), (ins), +def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; // Moves to and from segment registers def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; + +// Moves to and from debug registers +def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; + +// Moves to and from control registers +def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG_64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_64:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; // Sign/Zero extenders @@ -365,6 +393,16 @@ def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), "movs{lq|xd}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (sextloadi64i32 addr:$src))]>; +// movzbq and movzwq encodings for the disassembler +def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src), + "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src), + "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), + "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), + "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB; + // Use movzbl instead of movzbq when the destination is a register; it's // equivalent due to implicit zero-extending, and it has a smaller encoding. def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), @@ -430,31 +468,36 @@ let isTwoAddress = 1 in { let isConvertibleToThreeAddress = 1 in { let isCommutable = 1 in // Register-Register Addition -def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; // Register-Integer Addition -def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; -def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)), (implicit EFLAGS)]>; } // isConvertibleToThreeAddress // Register-Memory Addition -def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; // Register-Register Addition - Equivalent to the normal rr form (ADD64rr), but // differently encoded. -def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "add{l}\t{$src2, $dst|$dst, $src2}", []>; } // isTwoAddress @@ -480,18 +523,26 @@ def ADC64i32 : RI<0x15, RawFrm, (outs), (ins i32imm:$src), let isTwoAddress = 1 in { let isCommutable = 1 in -def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>; -def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst), + (ins GR64:$src1, GR64:$src2), + "adc{q}\t{$src2, $dst|$dst, $src2}", []>; + +def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>; -def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>; -def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress @@ -501,21 +552,29 @@ def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>; def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (adde (load addr:$dst), i64immSExt8:$src2), + addr:$dst)]>; def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (adde (load addr:$dst), i64immSExt8:$src2), + addr:$dst)]>; } // Uses = [EFLAGS] let isTwoAddress = 1 in { // Register-Register Subtraction -def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; +def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", []>; + // Register-Memory Subtraction -def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; @@ -556,18 +615,26 @@ def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2), let Uses = [EFLAGS] in { let isTwoAddress = 1 in { -def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>; -def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "sbb{q}\t{$src2, $dst|$dst, $src2}", []>; + +def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>; -def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>; -def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress @@ -652,15 +719,19 @@ def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32 // Unsigned division / remainder let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in { -def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX +// RDX:RAX/r64 = RAX,RDX +def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), "div{q}\t$src", []>; // Signed division / remainder -def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX +// RDX:RAX/r64 = RAX,RDX +def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), "idiv{q}\t$src", []>; let mayLoad = 1 in { -def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX +// RDX:RAX/[mem64] = RAX,RDX +def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), "div{q}\t$src", []>; -def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX +// RDX:RAX/[mem64] = RAX,RDX +def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), "idiv{q}\t$src", []>; } } @@ -694,19 +765,23 @@ def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", // In 64-bit mode, single byte INC and DEC cannot be encoded. let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in { // Can transform into LEA. -def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst", +def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), + "inc{w}\t$dst", [(set GR16:$dst, (add GR16:$src, 1)), (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; -def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst", +def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), + "inc{l}\t$dst", [(set GR32:$dst, (add GR32:$src, 1)), (implicit EFLAGS)]>, Requires<[In64BitMode]>; -def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst", +def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), + "dec{w}\t$dst", [(set GR16:$dst, (add GR16:$src, -1)), (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; -def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst", +def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), + "dec{l}\t$dst", [(set GR32:$dst, (add GR32:$src, -1)), (implicit EFLAGS)]>, Requires<[In64BitMode]>; @@ -743,13 +818,14 @@ def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src), "shl{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (shl GR64:$src, CL))]>; let isConvertibleToThreeAddress = 1 in // Can transform into LEA. -def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "shl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>; // NOTE: We don't include patterns for shifts of a register by one, because // 'add reg,reg' is cheaper. def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1), - "shr{q}\t$dst", []>; + "shl{q}\t$dst", []>; } // isTwoAddress let Uses = [CL] in @@ -792,9 +868,10 @@ let Uses = [CL] in def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src), "sar{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (sra GR64:$src, CL))]>; -def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), - "sar{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>; +def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), + "sar{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>; def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1), "sar{q}\t$dst", [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>; @@ -826,7 +903,8 @@ def RCL64mCL : RI<0xD3, MRM2m, (outs i64mem:$dst), (ins i64mem:$src), } def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt), "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCL64mi : RIi8<0xC1, MRM2m, (outs i64mem:$dst), (ins i64mem:$src, i8imm:$cnt), +def RCL64mi : RIi8<0xC1, MRM2m, (outs i64mem:$dst), + (ins i64mem:$src, i8imm:$cnt), "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>; def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src), @@ -841,7 +919,8 @@ def RCR64mCL : RI<0xD3, MRM3m, (outs i64mem:$dst), (ins i64mem:$src), } def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt), "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCR64mi : RIi8<0xC1, MRM3m, (outs i64mem:$dst), (ins i64mem:$src, i8imm:$cnt), +def RCR64mi : RIi8<0xC1, MRM3m, (outs i64mem:$dst), + (ins i64mem:$src, i8imm:$cnt), "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>; } @@ -850,7 +929,8 @@ let Uses = [CL] in def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src), "rol{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (rotl GR64:$src, CL))]>; -def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "rol{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>; def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), @@ -859,9 +939,9 @@ def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), } // isTwoAddress let Uses = [CL] in -def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst), - "rol{q}\t{%cl, $dst|$dst, %CL}", - [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>; +def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst), + "rol{q}\t{%cl, $dst|$dst, %CL}", + [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>; def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src), "rol{q}\t{$src, $dst|$dst, $src}", [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -874,7 +954,8 @@ let Uses = [CL] in def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src), "ror{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (rotr GR64:$src, CL))]>; -def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "ror{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>; def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1), @@ -896,23 +977,29 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst), // Double shift instructions (generalizations of rotate) let isTwoAddress = 1 in { let Uses = [CL] in { -def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB; -def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, + TB; +def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB; + [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, + TB; } let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction def SHLD64rri8 : RIi8<0xA4, MRMDestReg, - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3), + (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2, i8imm:$src3), "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, (i8 imm:$src3)))]>, TB; def SHRD64rri8 : RIi8<0xAC, MRMDestReg, - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3), + (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2, i8imm:$src3), "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, (i8 imm:$src3)))]>, @@ -965,6 +1052,9 @@ def AND64rr : RI<0x21, MRMDestReg, "and{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (and GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; +def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "and{q}\t{$src2, $dst|$dst, $src2}", []>; def AND64rm : RI<0x23, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "and{q}\t{$src2, $dst|$dst, $src2}", @@ -1000,19 +1090,26 @@ def AND64mi32 : RIi32<0x81, MRM4m, let isTwoAddress = 1 in { let isCommutable = 1 in -def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; -def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "or{q}\t{$src2, $dst|$dst, $src2}", []>; +def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; -def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)), (implicit EFLAGS)]>; @@ -1036,15 +1133,21 @@ def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i32imm:$src), let isTwoAddress = 1 in { let isCommutable = 1 in -def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; -def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "xor{q}\t{$src2, $dst|$dst, $src2}", []>; +def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; @@ -1148,10 +1251,12 @@ def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), // Unlike with the register+register form, the memory+register form of the // bt instruction does not ignore the high bits of the index. From ISel's // perspective, this is pretty bizarre. Disable these instructions for now. -//def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), -// "bt{q}\t{$src2, $src1|$src1, $src2}", +def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "bt{q}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi64 addr:$src1), GR64:$src2), -// (implicit EFLAGS)]>, TB; +// (implicit EFLAGS)] + [] + >, TB; def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", @@ -1164,6 +1269,33 @@ def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2), (implicit EFLAGS)]>, TB; + +def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; } // Defs = [EFLAGS] // Conditional moves @@ -1171,164 +1303,164 @@ let Uses = [EFLAGS], isTwoAddress = 1 in { let isCommutable = 1 in { def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_B, EFLAGS))]>, TB; def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_AE, EFLAGS))]>, TB; def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_E, EFLAGS))]>, TB; def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NE, EFLAGS))]>, TB; def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_BE, EFLAGS))]>, TB; def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_A, EFLAGS))]>, TB; def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_L, EFLAGS))]>, TB; def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_GE, EFLAGS))]>, TB; def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_LE, EFLAGS))]>, TB; def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_G, EFLAGS))]>, TB; def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_S, EFLAGS))]>, TB; def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NS, EFLAGS))]>, TB; def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_P, EFLAGS))]>, TB; def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NP, EFLAGS))]>, TB; def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_O, EFLAGS))]>, TB; def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NO, EFLAGS))]>, TB; } // isCommutable = 1 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_B, EFLAGS))]>, TB; def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_AE, EFLAGS))]>, TB; def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_E, EFLAGS))]>, TB; def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NE, EFLAGS))]>, TB; def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_BE, EFLAGS))]>, TB; def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_A, EFLAGS))]>, TB; def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_L, EFLAGS))]>, TB; def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_GE, EFLAGS))]>, TB; def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_LE, EFLAGS))]>, TB; def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_G, EFLAGS))]>, TB; def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_S, EFLAGS))]>, TB; def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NS, EFLAGS))]>, TB; def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_P, EFLAGS))]>, TB; def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NP, EFLAGS))]>, TB; def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_O, EFLAGS))]>, TB; def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NO, EFLAGS))]>, TB; } // isTwoAddress @@ -1337,9 +1469,9 @@ def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64] let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "sbb{q}\t$dst, $dst", - [(set GR64:$dst, (zext (X86setcc_c X86_COND_B, EFLAGS)))]>; + [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; -def : Pat<(i64 (anyext (X86setcc_c X86_COND_B, EFLAGS))), +def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), (SETB_C64r)>; //===----------------------------------------------------------------------===// @@ -1347,11 +1479,16 @@ def : Pat<(i64 (anyext (X86setcc_c X86_COND_B, EFLAGS))), // // f64 -> signed i64 +def CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src), + "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>; +def CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src), + "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>; def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvtsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvtsd2si64 VR128:$src))]>; -def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src), +def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), + (ins f128mem:$src), "cvtsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvtsd2si64 (load addr:$src)))]>; @@ -1365,7 +1502,8 @@ def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvttsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvttsd2si64 VR128:$src))]>; -def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src), +def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), + (ins f128mem:$src), "cvttsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvttsd2si64 @@ -1410,7 +1548,8 @@ let isTwoAddress = 1 in { (int_x86_sse_cvtsi642ss VR128:$src1, GR64:$src2))]>; def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2), + (outs VR128:$dst), + (ins VR128:$src1, i64mem:$src2), "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse_cvtsi642ss VR128:$src1, @@ -1418,6 +1557,10 @@ let isTwoAddress = 1 in { } // f32 -> signed i64 +def CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src), + "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>; +def CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src), + "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>; def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvtss2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, @@ -1436,10 +1579,20 @@ def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvttss2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse_cvttss2si64 VR128:$src))]>; -def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src), +def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), + (ins f32mem:$src), "cvttss2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse_cvttss2si64 (load addr:$src)))]>; + +// Descriptor-table support instructions + +// LLDT is not interpreted specially in 64-bit mode because there is no sign +// extension. +def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins), + "sldt{q}\t$dst", []>, TB; +def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins), + "sldt{q}\t$dst", []>, TB; //===----------------------------------------------------------------------===// // Alias Instructions @@ -1505,17 +1658,37 @@ def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), let Constraints = "$val = $dst" in { let Defs = [EFLAGS] in -def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), +def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr), "lock\n\t" "xadd\t$val, $ptr", [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, TB, LOCK; -def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), - "xchg\t$val, $ptr", +def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$val,i64mem:$ptr), + "xchg{q}\t{$val, $ptr|$ptr, $val}", [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>; + +def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src), + "xchg{q}\t{$val, $src|$src, $val}", []>; } +def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), + "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB; +def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), + "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), + "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB; +def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), + "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst), + "cmpxchg16b\t$dst", []>, TB; + +def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src), + "xchg{q}\t{$src, %rax|%rax, $src}", []>; + // Optimized codegen when the non-memory output is not used. let Defs = [EFLAGS] in { // FIXME: Use normal add / sub instructions and add lock prefix dynamically. @@ -1585,6 +1758,36 @@ def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src), "lar{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB; + +def SWPGS : I<0x01, RawFrm, (outs), (ins), "swpgs", []>, TB; + +def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins), + "push{q}\t%fs", []>, TB; +def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins), + "push{q}\t%gs", []>, TB; + +def POPFS64 : I<0xa1, RawFrm, (outs), (ins), + "pop{q}\t%fs", []>, TB; +def POPGS64 : I<0xa9, RawFrm, (outs), (ins), + "pop{q}\t%gs", []>, TB; + +def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src), + "lss{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src), + "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src), + "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB; + +// Specialized register support + +// no m form encodable; use SMSW16m +def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins), + "smsw{q}\t$dst", []>, TB; + // String manipulation instructions def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>; @@ -1722,9 +1925,9 @@ def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS), def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; // extload -// When extloading from 16-bit and smaller memory locations into 64-bit registers, -// use zero-extending loads so that the entire 64-bit register is defined, avoiding -// partial-register updates. +// When extloading from 16-bit and smaller memory locations into 64-bit +// registers, use zero-extending loads so that the entire 64-bit register is +// defined, avoiding partial-register updates. def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>; def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>; @@ -1995,7 +2198,8 @@ def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (ADD64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (ADD64mi32 addr:$dst, i64immSExt32:$src2)>; @@ -2025,11 +2229,13 @@ def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2), (SUB64mr addr:$dst, GR64:$src2)>; // Memory-Integer Subtraction with EFLAGS result -def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt8:$src2), +def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), + i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (SUB64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (SUB64mi32 addr:$dst, i64immSExt32:$src2)>; @@ -2153,7 +2359,8 @@ def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (XOR64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (XOR64mi32 addr:$dst, i64immSExt32:$src2)>; @@ -2185,7 +2392,8 @@ def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (AND64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (AND64mi32 addr:$dst, i64immSExt32:$src2)>; diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index b0b0409..71ec178 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -195,48 +195,67 @@ def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2), TwoArgFP, // These instructions cannot address 80-bit memory. multiclass FPBinary<SDNode OpNode, Format fp, string asmstring> { // ST(0) = ST(0) + [mem] -def _Fp32m : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, f32mem:$src2), OneArgFPRW, +def _Fp32m : FpIf32<(outs RFP32:$dst), + (ins RFP32:$src1, f32mem:$src2), OneArgFPRW, [(set RFP32:$dst, (OpNode RFP32:$src1, (loadf32 addr:$src2)))]>; -def _Fp64m : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, f64mem:$src2), OneArgFPRW, +def _Fp64m : FpIf64<(outs RFP64:$dst), + (ins RFP64:$src1, f64mem:$src2), OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (loadf64 addr:$src2)))]>; -def _Fp64m32: FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, f32mem:$src2), OneArgFPRW, +def _Fp64m32: FpIf64<(outs RFP64:$dst), + (ins RFP64:$src1, f32mem:$src2), OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (f64 (extloadf32 addr:$src2))))]>; -def _Fp80m32: FpI_<(outs RFP80:$dst), (ins RFP80:$src1, f32mem:$src2), OneArgFPRW, +def _Fp80m32: FpI_<(outs RFP80:$dst), + (ins RFP80:$src1, f32mem:$src2), OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (f80 (extloadf32 addr:$src2))))]>; -def _Fp80m64: FpI_<(outs RFP80:$dst), (ins RFP80:$src1, f64mem:$src2), OneArgFPRW, +def _Fp80m64: FpI_<(outs RFP80:$dst), + (ins RFP80:$src1, f64mem:$src2), OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (f80 (extloadf64 addr:$src2))))]>; def _F32m : FPI<0xD8, fp, (outs), (ins f32mem:$src), - !strconcat("f", !strconcat(asmstring, "{s}\t$src"))> { let mayLoad = 1; } + !strconcat("f", !strconcat(asmstring, "{s}\t$src"))> { + let mayLoad = 1; +} def _F64m : FPI<0xDC, fp, (outs), (ins f64mem:$src), - !strconcat("f", !strconcat(asmstring, "{l}\t$src"))> { let mayLoad = 1; } + !strconcat("f", !strconcat(asmstring, "{l}\t$src"))> { + let mayLoad = 1; +} // ST(0) = ST(0) + [memint] -def _FpI16m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i16mem:$src2), OneArgFPRW, +def _FpI16m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i16mem:$src2), + OneArgFPRW, [(set RFP32:$dst, (OpNode RFP32:$src1, (X86fild addr:$src2, i16)))]>; -def _FpI32m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i32mem:$src2), OneArgFPRW, +def _FpI32m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i32mem:$src2), + OneArgFPRW, [(set RFP32:$dst, (OpNode RFP32:$src1, (X86fild addr:$src2, i32)))]>; -def _FpI16m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i16mem:$src2), OneArgFPRW, +def _FpI16m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i16mem:$src2), + OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (X86fild addr:$src2, i16)))]>; -def _FpI32m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i32mem:$src2), OneArgFPRW, +def _FpI32m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i32mem:$src2), + OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (X86fild addr:$src2, i32)))]>; -def _FpI16m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i16mem:$src2), OneArgFPRW, +def _FpI16m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i16mem:$src2), + OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (X86fild addr:$src2, i16)))]>; -def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2), OneArgFPRW, +def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2), + OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (X86fild addr:$src2, i32)))]>; def _FI16m : FPI<0xDE, fp, (outs), (ins i16mem:$src), - !strconcat("fi", !strconcat(asmstring, "{s}\t$src"))> { let mayLoad = 1; } + !strconcat("fi", !strconcat(asmstring, "{s}\t$src"))> { + let mayLoad = 1; +} def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src), - !strconcat("fi", !strconcat(asmstring, "{l}\t$src"))> { let mayLoad = 1; } + !strconcat("fi", !strconcat(asmstring, "{l}\t$src"))> { + let mayLoad = 1; +} } defm ADD : FPBinary_rr<fadd>; @@ -279,6 +298,9 @@ def DIV_FST0r : FPST0rInst <0xF0, "fdiv\t$op">; def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, %ST(0)}">; def DIVR_FPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p\t$op">; +def COM_FST0r : FPST0rInst <0xD0, "fcom\t$op">; +def COMP_FST0r : FPST0rInst <0xD8, "fcomp\t$op">; + // Unary operations. multiclass FPUnary<SDNode OpNode, bits<8> opcode, string asmstring> { def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src), OneArgFPRW, @@ -305,22 +327,22 @@ def TST_F : FPI<0xE4, RawFrm, (outs), (ins), "ftst">, D9; // Versions of FP instructions that take a single memory operand. Added for the // disassembler; remove as they are included with patterns elsewhere. -def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom\t$src">; -def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp\t$src">; +def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{l}\t$src">; +def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{l}\t$src">; def FLDENVm : FPI<0xD9, MRM4m, (outs), (ins f32mem:$src), "fldenv\t$src">; -def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fstenv\t$dst">; +def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fnstenv\t$dst">; def FICOM32m : FPI<0xDA, MRM2m, (outs), (ins i32mem:$src), "ficom{l}\t$src">; def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">; -def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom\t$src">; -def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp\t$src">; +def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom{ll}\t$src">; +def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp{ll}\t$src">; def FISTTP32m: FPI<0xDD, MRM1m, (outs i32mem:$dst), (ins), "fisttp{l}\t$dst">; def FRSTORm : FPI<0xDD, MRM4m, (outs f32mem:$dst), (ins), "frstor\t$dst">; -def FSAVEm : FPI<0xDD, MRM6m, (outs f32mem:$dst), (ins), "fsave\t$dst">; -def FSTSWm : FPI<0xDD, MRM7m, (outs f32mem:$dst), (ins), "fstsw\t$dst">; +def FSAVEm : FPI<0xDD, MRM6m, (outs f32mem:$dst), (ins), "fnsave\t$dst">; +def FNSTSWm : FPI<0xDD, MRM7m, (outs f32mem:$dst), (ins), "fnstsw\t$dst">; def FICOM16m : FPI<0xDE, MRM2m, (outs), (ins i16mem:$src), "ficom{w}\t$src">; def FICOMP16m: FPI<0xDE, MRM3m, (outs), (ins i16mem:$src), "ficomp{w}\t$src">; @@ -493,7 +515,8 @@ def ISTT_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP, let mayStore = 1 in { def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst">; def ISTT_FP32m : FPI<0xDB, MRM1m, (outs), (ins i32mem:$dst), "fisttp{l}\t$dst">; -def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst), "fisttp{ll}\t$dst">; +def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst), + "fisttp{ll}\t$dst">; } // FP Stack manipulation instructions. @@ -561,10 +584,15 @@ def UCOM_FIPr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop "fucomip\t{$reg, %st(0)|%ST(0), $reg}">, DF; } +def COM_FIr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg), + "fcomi\t{$reg, %st(0)|%ST(0), $reg}">, DB; +def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg), + "fcomip\t{$reg, %st(0)|%ST(0), $reg}">, DF; + // Floating point flag ops. let Defs = [AX] in def FNSTSW8r : I<0xE0, RawFrm, // AX = fp flags - (outs), (ins), "fnstsw", []>, DF; + (outs), (ins), "fnstsw %ax", []>, DF; def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world (outs), (ins i16mem:$dst), "fnstcw\t$dst", @@ -574,6 +602,44 @@ let mayLoad = 1 in def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16] (outs), (ins i16mem:$dst), "fldcw\t$dst", []>; +// Register free + +def FFREE : FPI<0xC0, AddRegFrm, (outs), (ins RST:$reg), + "ffree\t$reg">, DD; + +// Clear exceptions + +def FNCLEX : I<0xE2, RawFrm, (outs), (ins), "fnclex", []>, DB; + +// Operandless floating-point instructions for the disassembler + +def FNOP : I<0xD0, RawFrm, (outs), (ins), "fnop", []>, D9; +def FXAM : I<0xE5, RawFrm, (outs), (ins), "fxam", []>, D9; +def FLDL2T : I<0xE9, RawFrm, (outs), (ins), "fldl2t", []>, D9; +def FLDL2E : I<0xEA, RawFrm, (outs), (ins), "fldl2e", []>, D9; +def FLDPI : I<0xEB, RawFrm, (outs), (ins), "fldpi", []>, D9; +def FLDLG2 : I<0xEC, RawFrm, (outs), (ins), "fldlg2", []>, D9; +def FLDLN2 : I<0xED, RawFrm, (outs), (ins), "fldln2", []>, D9; +def F2XM1 : I<0xF0, RawFrm, (outs), (ins), "f2xm1", []>, D9; +def FYL2X : I<0xF1, RawFrm, (outs), (ins), "fyl2x", []>, D9; +def FPTAN : I<0xF2, RawFrm, (outs), (ins), "fptan", []>, D9; +def FPATAN : I<0xF3, RawFrm, (outs), (ins), "fpatan", []>, D9; +def FXTRACT : I<0xF4, RawFrm, (outs), (ins), "fxtract", []>, D9; +def FPREM1 : I<0xF5, RawFrm, (outs), (ins), "fprem1", []>, D9; +def FDECSTP : I<0xF6, RawFrm, (outs), (ins), "fdecstp", []>, D9; +def FINCSTP : I<0xF7, RawFrm, (outs), (ins), "fincstp", []>, D9; +def FPREM : I<0xF8, RawFrm, (outs), (ins), "fprem", []>, D9; +def FYL2XP1 : I<0xF9, RawFrm, (outs), (ins), "fyl2xp1", []>, D9; +def FSINCOS : I<0xFB, RawFrm, (outs), (ins), "fsincos", []>, D9; +def FRNDINT : I<0xFC, RawFrm, (outs), (ins), "frndint", []>, D9; +def FSCALE : I<0xFD, RawFrm, (outs), (ins), "fscale", []>, D9; +def FCOMPP : I<0xD9, RawFrm, (outs), (ins), "fcompp", []>, DE; + +def FXSAVE : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins), + "fxsave\t$dst", []>, TB; +def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src), + "fxrstor\t$src", []>, TB; + //===----------------------------------------------------------------------===// // Non-Instruction Patterns //===----------------------------------------------------------------------===// @@ -585,11 +651,15 @@ def : Pat<(X86fld addr:$src, f80), (LD_Fp80m addr:$src)>; // Required for CALL which return f32 / f64 / f80 values. def : Pat<(X86fst RFP32:$src, addr:$op, f32), (ST_Fp32m addr:$op, RFP32:$src)>; -def : Pat<(X86fst RFP64:$src, addr:$op, f32), (ST_Fp64m32 addr:$op, RFP64:$src)>; +def : Pat<(X86fst RFP64:$src, addr:$op, f32), (ST_Fp64m32 addr:$op, + RFP64:$src)>; def : Pat<(X86fst RFP64:$src, addr:$op, f64), (ST_Fp64m addr:$op, RFP64:$src)>; -def : Pat<(X86fst RFP80:$src, addr:$op, f32), (ST_Fp80m32 addr:$op, RFP80:$src)>; -def : Pat<(X86fst RFP80:$src, addr:$op, f64), (ST_Fp80m64 addr:$op, RFP80:$src)>; -def : Pat<(X86fst RFP80:$src, addr:$op, f80), (ST_FpP80m addr:$op, RFP80:$src)>; +def : Pat<(X86fst RFP80:$src, addr:$op, f32), (ST_Fp80m32 addr:$op, + RFP80:$src)>; +def : Pat<(X86fst RFP80:$src, addr:$op, f64), (ST_Fp80m64 addr:$op, + RFP80:$src)>; +def : Pat<(X86fst RFP80:$src, addr:$op, f80), (ST_FpP80m addr:$op, + RFP80:$src)>; // Floating point constant -0.0 and -1.0 def : Pat<(f32 fpimmneg0), (CHS_Fp32 (LD_Fp032))>, Requires<[FPStackf32]>; diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index 2f14bb0..a799f16 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -115,17 +115,20 @@ class I<bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> let Pattern = pattern; let CodeSize = 3; } -class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> +class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm, + list<dag> pattern> : X86Inst<o, f, Imm8 , outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; } -class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> +class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm, + list<dag> pattern> : X86Inst<o, f, Imm16, outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; } -class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> +class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm, + list<dag> pattern> : X86Inst<o, f, Imm32, outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; @@ -169,7 +172,8 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm, class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>; -class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>; class PSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasSSE1]>; @@ -205,9 +209,11 @@ class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // S3SI - SSE3 instructions with XS prefix. // S3DI - SSE3 instructions with XD prefix. -class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE3]>; -class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE3]>; class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasSSE3]>; @@ -255,7 +261,7 @@ class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm, // SS42AI = SSE 4.2 instructions with TA prefix class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern> + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSE42]>; // X86-64 Instruction templates... @@ -297,17 +303,24 @@ class RPDI<bits<8> o, Format F, dag outs, dag ins, string asm, // MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix. // MMXID - MMX instructions with XD prefix. // MMXIS - MMX instructions with XS prefix. -class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>; -class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX,In64BitMode]>; -class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, REX_W, Requires<[HasMMX]>; -class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasMMX]>; -class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>; -class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[HasMMX]>; -class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasMMX]>; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 1947d35..e555cd1 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1018,13 +1018,11 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, switch (Opc) { default: break; case X86::MOV8r0: - case X86::MOV16r0: case X86::MOV32r0: { if (!isSafeToClobberEFLAGS(MBB, I)) { switch (Opc) { default: break; case X86::MOV8r0: Opc = X86::MOV8ri; break; - case X86::MOV16r0: Opc = X86::MOV16ri; break; case X86::MOV32r0: Opc = X86::MOV32ri; break; } Clone = false; @@ -1880,7 +1878,7 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, if (SrcReg != X86::EFLAGS) return false; if (DestRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) { - BuildMI(MBB, MI, DL, get(X86::PUSHFQ)); + BuildMI(MBB, MI, DL, get(X86::PUSHFQ64)); BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg); return true; } else if (DestRC == &X86::GR32RegClass || @@ -2292,9 +2290,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, OpcodeTablePtr = &RegOp2MemOpTable2Addr; isTwoAddrFold = true; } else if (i == 0) { // If operand 0 - if (MI->getOpcode() == X86::MOV16r0) - NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI); - else if (MI->getOpcode() == X86::MOV32r0) + if (MI->getOpcode() == X86::MOV32r0) NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); else if (MI->getOpcode() == X86::MOV8r0) NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); @@ -2370,6 +2366,23 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Check switch flag if (NoFusing) return NULL; + if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) + switch (MI->getOpcode()) { + case X86::CVTSD2SSrr: + case X86::Int_CVTSD2SSrr: + case X86::CVTSS2SDrr: + case X86::Int_CVTSS2SDrr: + case X86::RCPSSr: + case X86::RCPSSr_Int: + case X86::ROUNDSDr_Int: + case X86::ROUNDSSr_Int: + case X86::RSQRTSSr: + case X86::RSQRTSSr_Int: + case X86::SQRTSSr: + case X86::SQRTSSr_Int: + return 0; + } + const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Alignment = MFI->getObjectAlignment(FrameIndex); @@ -2405,6 +2418,23 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Check switch flag if (NoFusing) return NULL; + if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) + switch (MI->getOpcode()) { + case X86::CVTSD2SSrr: + case X86::Int_CVTSD2SSrr: + case X86::CVTSS2SDrr: + case X86::Int_CVTSS2SDrr: + case X86::RCPSSr: + case X86::RCPSSr_Int: + case X86::ROUNDSDr_Int: + case X86::ROUNDSSr_Int: + case X86::RSQRTSSr: + case X86::RSQRTSSr_Int: + case X86::SQRTSSr: + case X86::SQRTSSr_Int: + return 0; + } + // Determine the alignment of the load. unsigned Alignment = 0; if (LoadMI->hasOneMemOperand()) @@ -2529,7 +2559,6 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, } else if (OpNum == 0) { // If operand 0 switch (Opc) { case X86::MOV8r0: - case X86::MOV16r0: case X86::MOV32r0: return true; default: break; @@ -2558,7 +2587,6 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, MemOp2RegOpTable.find((unsigned*)MI->getOpcode()); if (I == MemOp2RegOpTable.end()) return false; - DebugLoc dl = MI->getDebugLoc(); unsigned Opc = I->second.first; unsigned Index = I->second.second & 0xf; bool FoldedLoad = I->second.second & (1 << 4); diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 3cc1853..4d922a5 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -1,4 +1,4 @@ -//===- X86InstrInfo.td - Describe the X86 Instruction Set --*- tablegen -*-===// + // // The LLVM Compiler Infrastructure // @@ -41,6 +41,9 @@ def SDTX86BrCond : SDTypeProfile<0, 3, def SDTX86SetCC : SDTypeProfile<1, 2, [SDTCisVT<0, i8>, SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; +def SDTX86SetCC_C : SDTypeProfile<1, 2, + [SDTCisInt<0>, + SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>, SDTCisVT<2, i8>]>; @@ -87,7 +90,7 @@ def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>; def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond, [SDNPHasChain]>; def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>; -def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC>; +def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>; def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas, [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore, @@ -196,6 +199,12 @@ class X86MemOperand<string printMethod> : Operand<iPTR> { def opaque32mem : X86MemOperand<"printopaquemem">; def opaque48mem : X86MemOperand<"printopaquemem">; def opaque80mem : X86MemOperand<"printopaquemem">; +def opaque512mem : X86MemOperand<"printopaquemem">; + +def offset8 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } +def offset16 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } +def offset32 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } +def offset64 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } def i8mem : X86MemOperand<"printi8mem">; def i16mem : X86MemOperand<"printi16mem">; @@ -289,6 +298,7 @@ def FarData : Predicate<"TM.getCodeModel() != CodeModel::Small &&" def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||" "TM.getCodeModel() == CodeModel::Kernel">; def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">; +def OptForSize : Predicate<"OptForSize">; def OptForSpeed : Predicate<"!OptForSize">; def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">; def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">; @@ -351,7 +361,8 @@ def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{ return false; }]>; -def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{ +def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), +[{ LoadSDNode *LD = cast<LoadSDNode>(N); if (const Value *Src = LD->getSrcValue()) if (const PointerType *PT = dyn_cast<PointerType>(Src->getType())) @@ -539,13 +550,17 @@ def VASTART_SAVE_XMM_REGS : I<0, Pseudo, // Nop let neverHasSideEffects = 1 in { def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>; + def NOOPW : I<0x1f, MRM0m, (outs), (ins i16mem:$zero), + "nop{w}\t$zero", []>, TB, OpSize; def NOOPL : I<0x1f, MRM0m, (outs), (ins i32mem:$zero), - "nopl\t$zero", []>, TB; + "nop{l}\t$zero", []>, TB; } // Trap def INT3 : I<0xcc, RawFrm, (outs), (ins), "int\t3", []>; def INT : I<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap", []>; +def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", []>, OpSize; +def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l}", []>; // PIC base construction. This expands to code that looks like this: // call $next_inst @@ -709,12 +724,14 @@ def ENTER : I<0xC8, RawFrm, (outs), (ins i16imm:$len, i8imm:$lvl), // Tail call stuff. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in -def TCRETURNdi : I<0, Pseudo, (outs), (ins i32imm:$dst, i32imm:$offset, variable_ops), +def TCRETURNdi : I<0, Pseudo, (outs), + (ins i32imm:$dst, i32imm:$offset, variable_ops), "#TC_RETURN $dst $offset", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in -def TCRETURNri : I<0, Pseudo, (outs), (ins GR32:$dst, i32imm:$offset, variable_ops), +def TCRETURNri : I<0, Pseudo, (outs), + (ins GR32:$dst, i32imm:$offset, variable_ops), "#TC_RETURN $dst $offset", []>; @@ -722,7 +739,8 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in def TAILJMPd : IBr<0xE9, (ins i32imm_pcrel:$dst), "jmp\t$dst # TAILCALL", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in - def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst # TAILCALL", + def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), + "jmp{l}\t{*}$dst # TAILCALL", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), @@ -735,6 +753,15 @@ let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>; +def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS; +def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), + "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS; +def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS; +def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS; + let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in { let mayLoad = 1 in { def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>, @@ -770,10 +797,14 @@ def PUSH32i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm), "push{l}\t$imm", []>; } -let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in -def POPFD : I<0x9D, RawFrm, (outs), (ins), "popf", []>; -let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in -def PUSHFD : I<0x9C, RawFrm, (outs), (ins), "pushf", []>; +let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in { +def POPF : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize; +def POPFD : I<0x9D, RawFrm, (outs), (ins), "popf{l}", []>; +} +let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in { +def PUSHF : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize; +def PUSHFD : I<0x9C, RawFrm, (outs), (ins), "pushf{l}", []>; +} let isTwoAddress = 1 in // GR32 = bswap GR32 def BSWAP32r : I<0xC8, AddRegFrm, @@ -915,6 +946,13 @@ let Uses = [EAX] in def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port), "out{l}\t{%eax, $port|$port, %EAX}", []>; +def IN8 : I<0x6C, RawFrm, (outs), (ins), + "ins{b}", []>; +def IN16 : I<0x6D, RawFrm, (outs), (ins), + "ins{w}", []>, OpSize; +def IN32 : I<0x6D, RawFrm, (outs), (ins), + "ins{l}", []>; + //===----------------------------------------------------------------------===// // Move Instructions... // @@ -947,18 +985,18 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src), "mov{l}\t{$src, $dst|$dst, $src}", [(store (i32 imm:$src), addr:$dst)]>; -def MOV8o8a : Ii8 <0xA0, RawFrm, (outs), (ins i8imm:$src), +def MOV8o8a : Ii8 <0xA0, RawFrm, (outs), (ins offset8:$src), "mov{b}\t{$src, %al|%al, $src}", []>; -def MOV16o16a : Ii16 <0xA1, RawFrm, (outs), (ins i16imm:$src), +def MOV16o16a : Ii16 <0xA1, RawFrm, (outs), (ins offset16:$src), "mov{w}\t{$src, %ax|%ax, $src}", []>, OpSize; -def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins i32imm:$src), +def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src), "mov{l}\t{$src, %eax|%eax, $src}", []>; -def MOV8ao8 : Ii8 <0xA2, RawFrm, (outs i8imm:$dst), (ins), +def MOV8ao8 : Ii8 <0xA2, RawFrm, (outs offset8:$dst), (ins), "mov{b}\t{%al, $dst|$dst, %al}", []>; -def MOV16ao16 : Ii16 <0xA3, RawFrm, (outs i16imm:$dst), (ins), +def MOV16ao16 : Ii16 <0xA3, RawFrm, (outs offset16:$dst), (ins), "mov{w}\t{%ax, $dst|$dst, %ax}", []>, OpSize; -def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs i32imm:$dst), (ins), +def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins), "mov{l}\t{%eax, $dst|$dst, %eax}", []>; // Moves to and from segment registers @@ -971,6 +1009,13 @@ def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src), def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src), "mov{w}\t{$src, $dst|$dst, $src}", []>; +def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src), + "mov{b}\t{$src, $dst|$dst, $src}", []>; +def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize; +def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "mov{l}\t{$src, $dst|$dst, $src}", []>; + let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src), "mov{b}\t{$src, $dst|$dst, $src}", @@ -1010,6 +1055,18 @@ def MOV8rm_NOREX : I<0x8A, MRMSrcMem, (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src), "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>; +// Moves to and from debug registers +def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src), + "mov{l}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src), + "mov{l}\t{$src, $dst|$dst, $src}", []>, TB; + +// Moves to and from control registers +def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG_32:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_32:$dst), (ins GR32:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; + //===----------------------------------------------------------------------===// // Fixed-Register Multiplication and Division Instructions... // @@ -1071,7 +1128,7 @@ def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src), let Defs = [AX,DX,EFLAGS], Uses = [AX] in def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src), "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16] -let Defs = [EAX,EDX], Uses = [EAX] in +let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src), "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32] } @@ -1079,45 +1136,47 @@ def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src), // unsigned division/remainder let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH +def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH "div{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX +def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX "div{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX +def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX "div{l}\t$src", []>; let mayLoad = 1 in { let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH +def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH "div{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX +def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX "div{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX + // EDX:EAX/[mem32] = EAX,EDX +def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src), "div{l}\t$src", []>; } // Signed division/remainder. let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH +def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH "idiv{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX +def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX "idiv{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX +def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX "idiv{l}\t$src", []>; let mayLoad = 1, mayLoad = 1 in { let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH +def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH "idiv{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX +def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX "idiv{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX +def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src), + // EDX:EAX/[mem32] = EAX,EDX "idiv{l}\t$src", []>; } @@ -1145,193 +1204,193 @@ def CMOV_GR8 : I<0, Pseudo, let isCommutable = 1 in { def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_B, EFLAGS))]>, TB, OpSize; def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_B, EFLAGS))]>, TB; def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_AE, EFLAGS))]>, TB, OpSize; def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_AE, EFLAGS))]>, TB; def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_E, EFLAGS))]>, TB, OpSize; def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_E, EFLAGS))]>, TB; def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NE, EFLAGS))]>, TB, OpSize; def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NE, EFLAGS))]>, TB; def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_BE, EFLAGS))]>, TB, OpSize; def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_BE, EFLAGS))]>, TB; def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_A, EFLAGS))]>, TB, OpSize; def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_A, EFLAGS))]>, TB; def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_L, EFLAGS))]>, TB, OpSize; def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_L, EFLAGS))]>, TB; def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_GE, EFLAGS))]>, TB, OpSize; def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_GE, EFLAGS))]>, TB; def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_LE, EFLAGS))]>, TB, OpSize; def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_LE, EFLAGS))]>, TB; def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_G, EFLAGS))]>, TB, OpSize; def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_G, EFLAGS))]>, TB; def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_S, EFLAGS))]>, TB, OpSize; def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_S, EFLAGS))]>, TB; def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NS, EFLAGS))]>, TB, OpSize; def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NS, EFLAGS))]>, TB; def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_P, EFLAGS))]>, TB, OpSize; def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_P, EFLAGS))]>, TB; def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NP, EFLAGS))]>, TB, OpSize; def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NP, EFLAGS))]>, TB; def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_O, EFLAGS))]>, TB, OpSize; def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_O, EFLAGS))]>, TB; def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NO, EFLAGS))]>, TB, OpSize; def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NO, EFLAGS))]>, TB; @@ -1339,193 +1398,193 @@ def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32 def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_B, EFLAGS))]>, TB, OpSize; def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_B, EFLAGS))]>, TB; def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_AE, EFLAGS))]>, TB, OpSize; def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_AE, EFLAGS))]>, TB; def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_E, EFLAGS))]>, TB, OpSize; def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_E, EFLAGS))]>, TB; def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NE, EFLAGS))]>, TB, OpSize; def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NE, EFLAGS))]>, TB; def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_BE, EFLAGS))]>, TB, OpSize; def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_BE, EFLAGS))]>, TB; def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_A, EFLAGS))]>, TB, OpSize; def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_A, EFLAGS))]>, TB; def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_L, EFLAGS))]>, TB, OpSize; def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_L, EFLAGS))]>, TB; def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_GE, EFLAGS))]>, TB, OpSize; def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_GE, EFLAGS))]>, TB; def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_LE, EFLAGS))]>, TB, OpSize; def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_LE, EFLAGS))]>, TB; def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_G, EFLAGS))]>, TB, OpSize; def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_G, EFLAGS))]>, TB; def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_S, EFLAGS))]>, TB, OpSize; def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_S, EFLAGS))]>, TB; def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NS, EFLAGS))]>, TB, OpSize; def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NS, EFLAGS))]>, TB; def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_P, EFLAGS))]>, TB, OpSize; def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_P, EFLAGS))]>, TB; def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NP, EFLAGS))]>, TB, OpSize; def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NP, EFLAGS))]>, TB; def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_O, EFLAGS))]>, TB, OpSize; def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_O, EFLAGS))]>, TB; def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NO, EFLAGS))]>, TB, OpSize; def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NO, EFLAGS))]>, TB; @@ -1583,11 +1642,13 @@ def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "inc{b}\t$dst", [(set GR8:$dst, (add GR8:$src, 1)), (implicit EFLAGS)]>; let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA. -def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst", +def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), + "inc{w}\t$dst", [(set GR16:$dst, (add GR16:$src, 1)), (implicit EFLAGS)]>, OpSize, Requires<[In32BitMode]>; -def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst", +def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), + "inc{l}\t$dst", [(set GR32:$dst, (add GR32:$src, 1)), (implicit EFLAGS)]>, Requires<[In32BitMode]>; } @@ -1610,11 +1671,13 @@ def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "dec{b}\t$dst", [(set GR8:$dst, (add GR8:$src, -1)), (implicit EFLAGS)]>; let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA. -def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst", +def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), + "dec{w}\t$dst", [(set GR16:$dst, (add GR16:$src, -1)), (implicit EFLAGS)]>, OpSize, Requires<[In32BitMode]>; -def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst", +def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), + "dec{l}\t$dst", [(set GR32:$dst, (add GR32:$src, -1)), (implicit EFLAGS)]>, Requires<[In32BitMode]>; } @@ -1654,6 +1717,17 @@ def AND32rr : I<0x21, MRMDestReg, (implicit EFLAGS)]>; } +// AND instructions with the destination register in REG and the source register +// in R/M. Included for the disassembler. +def AND8rr_REV : I<0x22, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "and{b}\t{$src2, $dst|$dst, $src2}", []>; +def AND16rr_REV : I<0x23, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "and{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def AND32rr_REV : I<0x23, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "and{l}\t{$src2, $dst|$dst, $src2}", []>; + def AND8rm : I<0x22, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2), "and{b}\t{$src2, $dst|$dst, $src2}", @@ -1753,50 +1827,73 @@ let isTwoAddress = 0 in { let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y -def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2), +def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst), + (ins GR8 :$src1, GR8 :$src2), "or{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (or GR8:$src1, GR8:$src2)), (implicit EFLAGS)]>; -def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), +def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, GR16:$src2)), (implicit EFLAGS)]>, OpSize; -def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), +def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, GR32:$src2)), (implicit EFLAGS)]>; } -def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2), + +// OR instructions with the destination register in REG and the source register +// in R/M. Included for the disassembler. +def OR8rr_REV : I<0x0A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "or{b}\t{$src2, $dst|$dst, $src2}", []>; +def OR16rr_REV : I<0x0B, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "or{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def OR32rr_REV : I<0x0B, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "or{l}\t{$src2, $dst|$dst, $src2}", []>; + +def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst), + (ins GR8 :$src1, i8mem :$src2), "or{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (or GR8:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), +def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst), + (ins GR16:$src1, i16mem:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, (load addr:$src2))), (implicit EFLAGS)]>, OpSize; -def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), +def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst), + (ins GR32:$src1, i32mem:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), +def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst), + (ins GR8 :$src1, i8imm:$src2), "or{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (or GR8:$src1, imm:$src2)), (implicit EFLAGS)]>; -def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), +def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst), + (ins GR16:$src1, i16imm:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, imm:$src2)), (implicit EFLAGS)]>, OpSize; -def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), +def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst), + (ins GR32:$src1, i32imm:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, imm:$src2)), (implicit EFLAGS)]>; -def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), +def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst), + (ins GR16:$src1, i16i8imm:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, i16immSExt8:$src2)), (implicit EFLAGS)]>, OpSize; -def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), +def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst), + (ins GR32:$src1, i32i8imm:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, i32immSExt8:$src2)), (implicit EFLAGS)]>; @@ -1863,6 +1960,17 @@ let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y (implicit EFLAGS)]>; } // isCommutable = 1 +// XOR instructions with the destination register in REG and the source register +// in R/M. Included for the disassembler. +def XOR8rr_REV : I<0x32, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "xor{b}\t{$src2, $dst|$dst, $src2}", []>; +def XOR16rr_REV : I<0x33, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "xor{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def XOR32rr_REV : I<0x33, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "xor{l}\t{$src2, $dst|$dst, $src2}", []>; + def XOR8rm : I<0x32, MRMSrcMem , (outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2), "xor{b}\t{$src2, $dst|$dst, $src2}", @@ -2202,7 +2310,8 @@ def RCL16mCL : I<0xD3, MRM2m, (outs i16mem:$dst), (ins i16mem:$src), } def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt), "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; -def RCL16mi : Ii8<0xC1, MRM2m, (outs i16mem:$dst), (ins i16mem:$src, i8imm:$cnt), +def RCL16mi : Ii8<0xC1, MRM2m, (outs i16mem:$dst), + (ins i16mem:$src, i8imm:$cnt), "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src), @@ -2217,7 +2326,8 @@ def RCL32mCL : I<0xD3, MRM2m, (outs i32mem:$dst), (ins i32mem:$src), } def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt), "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCL32mi : Ii8<0xC1, MRM2m, (outs i32mem:$dst), (ins i32mem:$src, i8imm:$cnt), +def RCL32mi : Ii8<0xC1, MRM2m, (outs i32mem:$dst), + (ins i32mem:$src, i8imm:$cnt), "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>; def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src), @@ -2247,7 +2357,8 @@ def RCR16mCL : I<0xD3, MRM3m, (outs i16mem:$dst), (ins i16mem:$src), } def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt), "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; -def RCR16mi : Ii8<0xC1, MRM3m, (outs i16mem:$dst), (ins i16mem:$src, i8imm:$cnt), +def RCR16mi : Ii8<0xC1, MRM3m, (outs i16mem:$dst), + (ins i16mem:$src, i8imm:$cnt), "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src), @@ -2262,7 +2373,8 @@ def RCR32mCL : I<0xD3, MRM3m, (outs i32mem:$dst), (ins i32mem:$src), } def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt), "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCR32mi : Ii8<0xC1, MRM3m, (outs i32mem:$dst), (ins i32mem:$src, i8imm:$cnt), +def RCR32mi : Ii8<0xC1, MRM3m, (outs i32mem:$dst), + (ins i32mem:$src, i8imm:$cnt), "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>; // FIXME: provide shorter instructions when imm8 == 1 @@ -2283,7 +2395,8 @@ def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>; def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2), "rol{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>, OpSize; + [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>, + OpSize; def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2), "rol{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>; @@ -2352,7 +2465,8 @@ def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>; def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2), "ror{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>, OpSize; + [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>, + OpSize; def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2), "ror{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>; @@ -2408,17 +2522,21 @@ let isTwoAddress = 0 in { // Double shift instructions (generalizations of rotate) let Uses = [CL] in { -def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), +def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB; -def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), +def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB; -def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), +def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>, TB, OpSize; -def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), +def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>, TB, OpSize; @@ -2426,25 +2544,29 @@ def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$sr let isCommutable = 1 in { // These instructions commute to each other. def SHLD32rri8 : Ii8<0xA4, MRMDestReg, - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3), + (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2, i8imm:$src3), "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, (i8 imm:$src3)))]>, TB; def SHRD32rri8 : Ii8<0xAC, MRMDestReg, - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3), + (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2, i8imm:$src3), "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, (i8 imm:$src3)))]>, TB; def SHLD16rri8 : Ii8<0xA4, MRMDestReg, - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3), + (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2, i8imm:$src3), "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, (i8 imm:$src3)))]>, TB, OpSize; def SHRD16rri8 : Ii8<0xAC, MRMDestReg, - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3), + (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2, i8imm:$src3), "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, (i8 imm:$src3)))]>, @@ -2642,6 +2764,16 @@ def ADC32rr : I<0x11, MRMDestReg, (outs GR32:$dst), "adc{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (adde GR32:$src1, GR32:$src2))]>; } + +def ADC8rr_REV : I<0x12, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "adc{b}\t{$src2, $dst|$dst, $src2}", []>; +def ADC16rr_REV : I<0x13, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "adc{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def ADC32rr_REV : I<0x13, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "adc{l}\t{$src2, $dst|$dst, $src2}", []>; + def ADC8rm : I<0x12, MRMSrcMem , (outs GR8:$dst), (ins GR8:$src1, i8mem:$src2), "adc{b}\t{$src2, $dst|$dst, $src2}", @@ -2728,6 +2860,15 @@ def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2), [(set GR32:$dst, (sub GR32:$src1, GR32:$src2)), (implicit EFLAGS)]>; +def SUB8rr_REV : I<0x2A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "sub{b}\t{$src2, $dst|$dst, $src2}", []>; +def SUB16rr_REV : I<0x2B, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def SUB32rr_REV : I<0x2B, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "sub{l}\t{$src2, $dst|$dst, $src2}", []>; + // Register-Memory Subtraction def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2), @@ -2869,6 +3010,16 @@ let isTwoAddress = 0 in { def SBB32i32 : Ii32<0x1D, RawFrm, (outs), (ins i32imm:$src), "sbb{l}\t{$src, %eax|%eax, $src}", []>; } + +def SBB8rr_REV : I<0x1A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "sbb{b}\t{$src2, $dst|$dst, $src2}", []>; +def SBB16rr_REV : I<0x1B, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "sbb{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def SBB32rr_REV : I<0x1B, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "sbb{l}\t{$src2, $dst|$dst, $src2}", []>; + def SBB8rm : I<0x1A, MRMSrcMem, (outs GR8:$dst), (ins GR8:$src1, i8mem:$src2), "sbb{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (sube GR8:$src1, (load addr:$src2)))]>; @@ -2923,7 +3074,8 @@ def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst), "imul{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2))), (implicit EFLAGS)]>, TB, OpSize; -def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), +def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), + (ins GR32:$src1, i32mem:$src2), "imul{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2))), (implicit EFLAGS)]>, TB; @@ -2955,12 +3107,12 @@ def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8 (implicit EFLAGS)]>; // Memory-Integer Signed Integer Multiply -def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16 +def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16 (outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2), "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR16:$dst, (mul (load addr:$src1), imm:$src2)), (implicit EFLAGS)]>, OpSize; -def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32 +def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32 (outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2), "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32:$dst, (mul (load addr:$src1), imm:$src2)), @@ -3068,11 +3220,11 @@ def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "sbb{w}\t$dst, $dst", - [(set GR16:$dst, (zext (X86setcc_c X86_COND_B, EFLAGS)))]>, + [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>, OpSize; def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "sbb{l}\t$dst, $dst", - [(set GR32:$dst, (zext (X86setcc_c X86_COND_B, EFLAGS)))]>; + [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; } // isCodeGenOnly def SETEr : I<0x94, MRM0r, @@ -3371,15 +3523,21 @@ def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), // Unlike with the register+register form, the memory+register form of the // bt instruction does not ignore the high bits of the index. From ISel's -// perspective, this is pretty bizarre. Disable these instructions for now. -//def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), -// "bt{w}\t{$src2, $src1|$src1, $src2}", +// perspective, this is pretty bizarre. Make these instructions disassembly +// only for now. + +def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "bt{w}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi16 addr:$src1), GR16:$src2), -// (implicit EFLAGS)]>, OpSize, TB, Requires<[FastBTMem]>; -//def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), -// "bt{l}\t{$src2, $src1|$src1, $src2}", +// (implicit EFLAGS)] + [] + >, OpSize, TB, Requires<[FastBTMem]>; +def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "bt{l}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi32 addr:$src1), GR32:$src2), -// (implicit EFLAGS)]>, TB, Requires<[FastBTMem]>; +// (implicit EFLAGS)] + [] + >, TB, Requires<[FastBTMem]>; def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", @@ -3400,12 +3558,67 @@ def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2), "bt{l}\t{$src2, $src1|$src1, $src2}", [(X86bt (loadi32 addr:$src1), i32immSExt8:$src2), (implicit EFLAGS)]>, TB; + +def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; } // Defs = [EFLAGS] // Sign/Zero extenders // Use movsbl intead of movsbw; we don't care about the high 16 bits // of the register here. This has a smaller encoding and avoids a -// partial-register update. +// partial-register update. Actual movsbw included for the disassembler. +def MOVSX16rr8W : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), + "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def MOVSX16rm8W : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), + "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), "", [(set GR16:$dst, (sext GR8:$src))]>, TB; def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), @@ -3425,7 +3638,11 @@ def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), // Use movzbl intead of movzbw; we don't care about the high 16 bits // of the register here. This has a smaller encoding and avoids a -// partial-register update. +// partial-register update. Actual movzbw included for the disassembler. +def MOVZX16rr8W : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), + "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def MOVZX16rm8W : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), + "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), "", [(set GR16:$dst, (zext GR8:$src))]>, TB; def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), @@ -3483,15 +3700,18 @@ let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "xor{b}\t$dst, $dst", [(set GR8:$dst, 0)]>; -// Use xorl instead of xorw since we don't care about the high 16 bits, -// it's smaller, and it avoids a partial-register update. -def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), - "", [(set GR16:$dst, 0)]>; -def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), + +def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "xor{l}\t$dst, $dst", [(set GR32:$dst, 0)]>; } +// Use xorl instead of xorw since we don't care about the high 16 bits, +// it's smaller, and it avoids a partial-register update. +let AddedComplexity = 1 in +def : Pat<(i16 0), + (EXTRACT_SUBREG (MOV32r0), x86_subreg_16bit)>; + //===----------------------------------------------------------------------===// // Thread Local Storage Instructions // @@ -3538,18 +3758,32 @@ def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), // Atomic swap. These are just normal xchg instructions. But since a memory // operand is referenced, the atomicity is ensured. let Constraints = "$val = $dst" in { -def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), +def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst), + (ins GR32:$val, i32mem:$ptr), "xchg{l}\t{$val, $ptr|$ptr, $val}", [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>; -def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), +def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst), + (ins GR16:$val, i16mem:$ptr), "xchg{w}\t{$val, $ptr|$ptr, $val}", [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>, OpSize; -def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), +def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), "xchg{b}\t{$val, $ptr|$ptr, $val}", [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>; + +def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src), + "xchg{l}\t{$val, $src|$src, $val}", []>; +def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src), + "xchg{w}\t{$val, $src|$src, $val}", []>, OpSize; +def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src), + "xchg{b}\t{$val, $src|$src, $val}", []>; } +def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src), + "xchg{w}\t{$src, %ax|%ax, $src}", []>, OpSize; +def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src), + "xchg{l}\t{$src, %eax|%eax, $src}", []>; + // Atomic compare and swap. let Defs = [EAX, EFLAGS], Uses = [EAX] in { def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap), @@ -3579,23 +3813,54 @@ def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap), // Atomic exchange and add let Constraints = "$val = $dst", Defs = [EFLAGS] in { -def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), +def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr), "lock\n\t" "xadd{l}\t{$val, $ptr|$ptr, $val}", [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>, TB, LOCK; -def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), +def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr), "lock\n\t" "xadd{w}\t{$val, $ptr|$ptr, $val}", [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>, TB, OpSize, LOCK; -def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), +def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), "lock\n\t" "xadd{b}\t{$val, $ptr|$ptr, $val}", [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>, TB, LOCK; } +def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), + "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB; +def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), + "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), + "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), + "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB; +def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), + "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), + "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), + "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB; +def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), + "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), + "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), + "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB; +def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), + "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), + "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst), + "cmpxchg8b\t$dst", []>, TB; + // Optimized codegen when the non-memory output is not used. // FIXME: Use normal add / sub instructions and add lock prefix dynamically. let Defs = [EFLAGS] in { @@ -3652,7 +3917,7 @@ def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2), def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2), "lock\n\t" "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; -def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2), +def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2), "lock\n\t" "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK; def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2), @@ -3777,12 +4042,193 @@ def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), "lar{l}\t{$src, $dst|$dst, $src}", []>, TB; def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "lar{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def LSL16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), + "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LSL16rr : I<0x03, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LSL32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB; +def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def INVLPG : I<0x01, RawFrm, (outs), (ins), "invlpg", []>, TB; + +def STRr : I<0x00, MRM1r, (outs GR16:$dst), (ins), + "str{w}\t{$dst}", []>, TB; +def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins), + "str{w}\t{$dst}", []>, TB; +def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src), + "ltr{w}\t{$src}", []>, TB; +def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src), + "ltr{w}\t{$src}", []>, TB; + +def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins), + "push{w}\t%fs", []>, OpSize, TB; +def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins), + "push{l}\t%fs", []>, TB; +def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins), + "push{w}\t%gs", []>, OpSize, TB; +def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins), + "push{l}\t%gs", []>, TB; + +def POPFS16 : I<0xa1, RawFrm, (outs), (ins), + "pop{w}\t%fs", []>, OpSize, TB; +def POPFS32 : I<0xa1, RawFrm, (outs), (ins), + "pop{l}\t%fs", []>, TB; +def POPGS16 : I<0xa9, RawFrm, (outs), (ins), + "pop{w}\t%gs", []>, OpSize, TB; +def POPGS32 : I<0xa9, RawFrm, (outs), (ins), + "pop{l}\t%gs", []>, TB; + +def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lds{w}\t{$src, $dst|$dst, $src}", []>, OpSize; +def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lds{l}\t{$src, $dst|$dst, $src}", []>; +def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lss{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LSS32rm : I<0xb2, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lss{l}\t{$src, $dst|$dst, $src}", []>, TB; +def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "les{w}\t{$src, $dst|$dst, $src}", []>, OpSize; +def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "les{l}\t{$src, $dst|$dst, $src}", []>; +def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lfs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LFS32rm : I<0xb4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lfs{l}\t{$src, $dst|$dst, $src}", []>, TB; +def LGS16rm : I<0xb5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lgs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lgs{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg), + "verr\t$seg", []>, TB; +def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg), + "verr\t$seg", []>, TB; +def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg), + "verw\t$seg", []>, TB; +def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg), + "verw\t$seg", []>, TB; + +// Descriptor-table support instructions + +def SGDTm : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins), + "sgdt\t$dst", []>, TB; +def SIDTm : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins), + "sidt\t$dst", []>, TB; +def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins), + "sldt{w}\t$dst", []>, TB; +def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins), + "sldt{w}\t$dst", []>, TB; +def LGDTm : I<0x01, MRM2m, (outs), (ins opaque48mem:$src), + "lgdt\t$src", []>, TB; +def LIDTm : I<0x01, MRM3m, (outs), (ins opaque48mem:$src), + "lidt\t$src", []>, TB; +def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src), + "lldt{w}\t$src", []>, TB; +def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src), + "lldt{w}\t$src", []>, TB; // String manipulation instructions def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", []>; def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", []>, OpSize; -def LODSD : I<0xAD, RawFrm, (outs), (ins), "lodsd", []>; +def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", []>; + +def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", []>; +def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", []>, OpSize; +def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", []>; + +// CPU flow control instructions + +def HLT : I<0xF4, RawFrm, (outs), (ins), "hlt", []>; +def RSM : I<0xAA, RawFrm, (outs), (ins), "rsm", []>, TB; + +// FPU control instructions + +def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", []>, DB; + +// Flag instructions + +def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>; +def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>; +def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", []>; +def STI : I<0xFB, RawFrm, (outs), (ins), "sti", []>; +def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>; +def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>; +def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>; + +def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", []>, TB; + +// Table lookup instructions + +def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>; + +// Specialized register support + +def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", []>, TB; +def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", []>, TB; +def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", []>, TB; + +def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins), + "smsw{w}\t$dst", []>, OpSize, TB; +def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins), + "smsw{l}\t$dst", []>, TB; +// For memory operands, there is only a 16-bit form +def SMSW16m : I<0x01, MRM4m, (outs i16mem:$dst), (ins), + "smsw{w}\t$dst", []>, TB; + +def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src), + "lmsw{w}\t$src", []>, TB; +def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src), + "lmsw{w}\t$src", []>, TB; + +def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", []>, TB; + +// Cache instructions + +def INVD : I<0x08, RawFrm, (outs), (ins), "invd", []>, TB; +def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", []>, TB; + +// VMX instructions + +// 66 0F 38 80 +def INVEPT : I<0x38, RawFrm, (outs), (ins), "invept", []>, OpSize, TB; +// 66 0F 38 81 +def INVVPID : I<0x38, RawFrm, (outs), (ins), "invvpid", []>, OpSize, TB; +// 0F 01 C1 +def VMCALL : I<0x01, RawFrm, (outs), (ins), "vmcall", []>, TB; +def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs), + "vmclear\t$vmcs", []>, OpSize, TB; +// 0F 01 C2 +def VMLAUNCH : I<0x01, RawFrm, (outs), (ins), "vmlaunch", []>, TB; +// 0F 01 C3 +def VMRESUME : I<0x01, RawFrm, (outs), (ins), "vmresume", []>, TB; +def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs), + "vmptrld\t$vmcs", []>, TB; +def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins), + "vmptrst\t$vmcs", []>, TB; +def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src), + "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), + "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src), + "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB; +def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), + "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB; +// 0F 01 C4 +def VMXOFF : I<0x01, RawFrm, (outs), (ins), "vmxoff", []>, OpSize; +def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon), + "vmxon\t{$vmxon}", []>, XD; //===----------------------------------------------------------------------===// // Non-Instruction Patterns @@ -4028,15 +4474,18 @@ def : Pat<(srl_su GR16:$src, (i8 8)), x86_subreg_16bit)>, Requires<[In32BitMode]>; def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), - (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), + (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, + GR16_ABCD)), x86_subreg_8bit_hi))>, Requires<[In32BitMode]>; def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), - (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), + (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, + GR16_ABCD)), x86_subreg_8bit_hi))>, Requires<[In32BitMode]>; def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), - (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), + (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, + GR32_ABCD)), x86_subreg_8bit_hi))>, Requires<[In32BitMode]>; @@ -4185,10 +4634,10 @@ def : Pat<(store (shld (loadi16 addr:$dst), (i8 imm:$amt1), GR16:$src2, (i8 imm:$amt2)), addr:$dst), (SHLD16mri8 addr:$dst, GR16:$src2, (i8 imm:$amt1))>; -// (anyext (setcc_carry)) -> (zext (setcc_carry)) -def : Pat<(i16 (anyext (X86setcc_c X86_COND_B, EFLAGS))), +// (anyext (setcc_carry)) -> (setcc_carry) +def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), (SETB_C16r)>; -def : Pat<(i32 (anyext (X86setcc_c X86_COND_B, EFLAGS))), +def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), (SETB_C32r)>; //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 500785b..fc40c9a 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -72,13 +72,13 @@ let Constraints = "$src1 = $dst" in { multiclass MMXI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, ValueType OpVT, bit Commutable = 0> { def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), - (ins VR64:$src1, VR64:$src2), + (ins VR64:$src1, VR64:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (OpVT (OpNode VR64:$src1, VR64:$src2)))]> { let isCommutable = Commutable; } def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), - (ins VR64:$src1, i64mem:$src2), + (ins VR64:$src1, i64mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (OpVT (OpNode VR64:$src1, (bitconvert @@ -88,13 +88,13 @@ let Constraints = "$src1 = $dst" in { multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId, bit Commutable = 0> { def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), - (ins VR64:$src1, VR64:$src2), + (ins VR64:$src1, VR64:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]> { let isCommutable = Commutable; } def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), - (ins VR64:$src1, i64mem:$src2), + (ins VR64:$src1, i64mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (IntId VR64:$src1, (bitconvert (load_mmx addr:$src2))))]>; @@ -144,9 +144,9 @@ let Constraints = "$src1 = $dst" in { //===----------------------------------------------------------------------===// def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms", - [(int_x86_mmx_emms)]>; + [(int_x86_mmx_emms)]>; def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", - [(int_x86_mmx_femms)]>; + [(int_x86_mmx_femms)]>; //===----------------------------------------------------------------------===// // MMX Scalar Instructions @@ -155,16 +155,21 @@ def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", // Data Transfer Instructions def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", - [(set VR64:$dst, - (v2i32 (scalar_to_vector GR32:$src)))]>; + [(set VR64:$dst, + (v2i32 (scalar_to_vector GR32:$src)))]>; let canFoldAsLoad = 1, isReMaterializable = 1 in def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, - (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; + (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; let mayStore = 1 in def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src), "movd\t{$src, $dst|$dst, $src}", []>; +def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs), (ins GR32:$dst, VR64:$src), + "movd\t{$src, $dst|$dst, $src}", []>; +def MMX_MOVQ64gmr : MMXRI<0x7E, MRMDestMem, (outs), + (ins i64mem:$dst, VR64:$src), + "movq\t{$src, $dst|$dst, $src}", []>; let neverHasSideEffects = 1 in def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), @@ -181,7 +186,7 @@ def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg, def MMX_MOVD64rrv164 : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, - (v1i64 (scalar_to_vector GR64:$src)))]>; + (v1i64 (scalar_to_vector GR64:$src)))]>; let neverHasSideEffects = 1 in def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), @@ -223,7 +228,7 @@ def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), (v2i32 (X86vzmovl (v2i32 (scalar_to_vector GR32:$src)))))]>; let AddedComplexity = 20 in def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), - (ins i32mem:$src), + (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (v2i32 (X86vzmovl (v2i32 @@ -432,21 +437,21 @@ def MMX_CVTPD2PIrr : MMX2I<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvtpd2pi\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTPD2PIrm : MMX2I<0x2D, MRMSrcMem, (outs VR64:$dst), - (ins f128mem:$src), + (ins f128mem:$src), "cvtpd2pi\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPI2PDrr : MMX2I<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src), "cvtpi2pd\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTPI2PDrm : MMX2I<0x2A, MRMSrcMem, (outs VR128:$dst), - (ins i64mem:$src), + (ins i64mem:$src), "cvtpi2pd\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src), "cvtpi2ps\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTPI2PSrm : MMXI<0x2A, MRMSrcMem, (outs VR128:$dst), - (ins i64mem:$src), + (ins i64mem:$src), "cvtpi2ps\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPS2PIrr : MMXI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), @@ -459,7 +464,7 @@ def MMX_CVTTPD2PIrr : MMX2I<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvttpd2pi\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTTPD2PIrm : MMX2I<0x2C, MRMSrcMem, (outs VR64:$dst), - (ins f128mem:$src), + (ins f128mem:$src), "cvttpd2pi\t{$src, $dst|$dst, $src}", []>; def MMX_CVTTPS2PIrr : MMXI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), @@ -481,14 +486,14 @@ def MMX_PEXTRWri : MMXIi8<0xC5, MRMSrcReg, (iPTR imm:$src2)))]>; let Constraints = "$src1 = $dst" in { def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg, - (outs VR64:$dst), (ins VR64:$src1, GR32:$src2, - i16i8imm:$src3), + (outs VR64:$dst), + (ins VR64:$src1, GR32:$src2,i16i8imm:$src3), "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1), GR32:$src2,(iPTR imm:$src3))))]>; def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem, - (outs VR64:$dst), (ins VR64:$src1, i16mem:$src2, - i16i8imm:$src3), + (outs VR64:$dst), + (ins VR64:$src1, i16mem:$src2, i16i8imm:$src3), "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1), diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 62841f8..b26e508 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -70,7 +70,7 @@ def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>; def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>; def SDTX86CmpPTest : SDTypeProfile<0, 2, [SDTCisVT<0, v4f32>, - SDTCisVT<1, v4f32>]>; + SDTCisVT<1, v4f32>]>; def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>; //===----------------------------------------------------------------------===// @@ -116,12 +116,18 @@ def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return cast<LoadSDNode>(N)->getAlignment() >= 16; }]>; -def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>; -def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>; -def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>; -def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>; -def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>; -def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>; +def alignedloadfsf32 : PatFrag<(ops node:$ptr), + (f32 (alignedload node:$ptr))>; +def alignedloadfsf64 : PatFrag<(ops node:$ptr), + (f64 (alignedload node:$ptr))>; +def alignedloadv4f32 : PatFrag<(ops node:$ptr), + (v4f32 (alignedload node:$ptr))>; +def alignedloadv2f64 : PatFrag<(ops node:$ptr), + (v2f64 (alignedload node:$ptr))>; +def alignedloadv4i32 : PatFrag<(ops node:$ptr), + (v4i32 (alignedload node:$ptr))>; +def alignedloadv2i64 : PatFrag<(ops node:$ptr), + (v2i64 (alignedload node:$ptr))>; // Like 'load', but uses special alignment checks suitable for use in // memory operands in most SSE instructions, which are required to @@ -363,6 +369,11 @@ def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>; // Match intrinsics which expect XMM operand(s). +def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src), + "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>; +def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src), + "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>; + def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), "cvtss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>; @@ -441,19 +452,26 @@ def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2), "ucomiss\t{$src2, $src1|$src1, $src2}", [(X86cmp FR32:$src1, (loadf32 addr:$src2)), (implicit EFLAGS)]>; + +def COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), + "comiss\t{$src2, $src1|$src1, $src2}", []>; +def COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), + "comiss\t{$src2, $src1|$src1, $src2}", []>; + } // Defs = [EFLAGS] // Aliases to match intrinsics which expect XMM operand(s). let Constraints = "$src1 = $dst" in { def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, VR128:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, VR128:$src, SSECC:$cc), "cmp${cc}ss\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1, - VR128:$src, imm:$cc))]>; + [(set VR128:$dst, (int_x86_sse_cmp_ss + VR128:$src1, + VR128:$src, imm:$cc))]>; def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, f32mem:$src, SSECC:$cc), "cmp${cc}ss\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1, (load addr:$src), imm:$cc))]>; @@ -806,9 +824,10 @@ multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr, } // Scalar operation, mem. - def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src), + def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src), !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"), - [(set FR32:$dst, (OpNode (load addr:$src)))]>; + [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS, + Requires<[HasSSE1, OptForSize]>; // Vector operation, reg. def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), @@ -1098,9 +1117,10 @@ def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src), def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (fround FR64:$src))]>; -def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), +def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (fround (loadf64 addr:$src)))]>; + [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD, + Requires<[HasSSE2, OptForSize]>; def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src), "cvtsi2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (sint_to_fp GR32:$src))]>; @@ -1137,7 +1157,10 @@ def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src), def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (extloadf32 addr:$src))]>, XS, - Requires<[HasSSE2]>; + Requires<[HasSSE2, OptForSize]>; + +def : Pat<(extloadf32 addr:$src), + (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[HasSSE2, OptForSpeed]>; // Match intrinsics which expect XMM operand(s). def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), @@ -1205,14 +1228,14 @@ def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2), // Aliases to match intrinsics which expect XMM operand(s). let Constraints = "$src1 = $dst" in { def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, VR128:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, VR128:$src, SSECC:$cc), "cmp${cc}sd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1, VR128:$src, imm:$cc))]>; def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, f64mem:$src, SSECC:$cc), "cmp${cc}sd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1, (load addr:$src), imm:$cc))]>; @@ -1542,9 +1565,15 @@ def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), [(set VR128:$dst, (int_x86_sse2_cvtps2dq (memop addr:$src)))]>; // SSE2 packed instructions with XS prefix +def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "cvttps2dq\t{$src, $dst|$dst, $src}", []>; +def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), + "cvttps2dq\t{$src, $dst|$dst, $src}", []>; + def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>, + [(set VR128:$dst, + (int_x86_sse2_cvttps2dq VR128:$src))]>, XS, Requires<[HasSSE2]>; def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", @@ -1572,6 +1601,11 @@ def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src), (memop addr:$src)))]>; // SSE2 instructions without OpSize prefix +def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB; +def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), + "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB; + def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>, @@ -1582,6 +1616,12 @@ def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), (load addr:$src)))]>, TB, Requires<[HasSSE2]>; +def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "cvtpd2ps\t{$src, $dst|$dst, $src}", []>; +def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), + "cvtpd2ps\t{$src, $dst|$dst, $src}", []>; + + def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtpd2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>; @@ -1856,31 +1896,34 @@ let Constraints = "$src1 = $dst" in { multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId, bit Commutable = 0> { - def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), + def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> { let isCommutable = Commutable; } - def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), + def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, - (bitconvert (memopv2i64 addr:$src2))))]>; + (bitconvert (memopv2i64 + addr:$src2))))]>; } multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm, string OpcodeStr, Intrinsic IntId, Intrinsic IntId2> { - def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, - VR128:$src2), + def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>; - def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, - i128mem:$src2), + def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))]>; - def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, - i32i8imm:$src2), + def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), + (ins VR128:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>; } @@ -1888,14 +1931,14 @@ multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm, /// PDI_binop_rm - Simple SSE2 binary operator. multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, ValueType OpVT, bit Commutable = 0> { - def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, - VR128:$src2), + def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> { let isCommutable = Commutable; } - def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, - i128mem:$src2), + def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (OpVT (OpNode VR128:$src1, (bitconvert (memopv2i64 addr:$src2)))))]>; @@ -1909,16 +1952,16 @@ multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode, bit Commutable = 0> { def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> { let isCommutable = Commutable; } def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src2), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (OpNode VR128:$src1, - (memopv2i64 addr:$src2)))]>; + (memopv2i64 addr:$src2)))]>; } } // Constraints = "$src1 = $dst" @@ -2455,6 +2498,13 @@ def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))), (MOVZPQILo2PQIrm addr:$src)>; } +// Instructions for the disassembler +// xr = XMM register +// xm = mem64 + +def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "movq\t{$src, $dst|$dst, $src}", []>, XS; + //===---------------------------------------------------------------------===// // SSE3 Instructions //===---------------------------------------------------------------------===// @@ -3175,13 +3225,14 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, OpSize; // Vector intrinsic operation, mem - def PSm_Int : SS4AIi8<opcps, MRMSrcMem, + def PSm_Int : Ii8<opcps, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>, - OpSize; + TA, OpSize, + Requires<[HasSSE41]>; // Vector intrinsic operation, reg def PDr_Int : SS4AIi8<opcpd, MRMSrcReg, @@ -3661,7 +3712,7 @@ let Constraints = "$src1 = $dst" in { "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [(set VR128:$dst, (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>, - OpSize; + OpSize; def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3), !strconcat(OpcodeStr, @@ -3786,76 +3837,63 @@ let Constraints = "$src1 = $dst" in { // String/text processing instructions. let Defs = [EFLAGS], usesCustomInserter = 1 in { def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "#PCMPISTRM128rr PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2, - imm:$src3))]>, OpSize; + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + "#PCMPISTRM128rr PSEUDO!", + [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2, + imm:$src3))]>, OpSize; def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "#PCMPISTRM128rm PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpistrm128 VR128:$src1, - (load addr:$src2), - imm:$src3))]>, OpSize; + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + "#PCMPISTRM128rm PSEUDO!", + [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, (load addr:$src2), + imm:$src3))]>, OpSize; } let Defs = [XMM0, EFLAGS] in { def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", - []>, OpSize; + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize; def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", - []>, OpSize; + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize; } -let Defs = [EFLAGS], Uses = [EAX, EDX], - usesCustomInserter = 1 in { +let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in { def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "#PCMPESTRM128rr PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX, - VR128:$src3, - EDX, imm:$src5))]>, OpSize; + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + "#PCMPESTRM128rr PSEUDO!", + [(set VR128:$dst, + (int_x86_sse42_pcmpestrm128 + VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize; + def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "#PCMPESTRM128rm PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX, - (load addr:$src3), - EDX, imm:$src5))]>, OpSize; + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + "#PCMPESTRM128rm PSEUDO!", + [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 + VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>, + OpSize; } let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in { def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", - []>, OpSize; + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize; def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", - []>, OpSize; + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize; } let Defs = [ECX, EFLAGS] in { multiclass SS42AI_pcmpistri<Intrinsic IntId128> { - def rr : SS42AI<0x63, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", - [(set ECX, - (IntId128 VR128:$src1, VR128:$src2, imm:$src3)), - (implicit EFLAGS)]>, - OpSize; + def rr : SS42AI<0x63, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", + [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)), + (implicit EFLAGS)]>, OpSize; def rm : SS42AI<0x63, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", - [(set ECX, - (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)), - (implicit EFLAGS)]>, - OpSize; + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", + [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)), + (implicit EFLAGS)]>, OpSize; } } @@ -3870,20 +3908,16 @@ let Defs = [ECX, EFLAGS] in { let Uses = [EAX, EDX] in { multiclass SS42AI_pcmpestri<Intrinsic IntId128> { def rr : SS42AI<0x61, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", - [(set ECX, - (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)), - (implicit EFLAGS)]>, - OpSize; + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", + [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)), + (implicit EFLAGS)]>, OpSize; def rm : SS42AI<0x61, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", - [(set ECX, - (IntId128 VR128:$src1, EAX, (load addr:$src3), - EDX, imm:$src5)), - (implicit EFLAGS)]>, - OpSize; + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", + [(set ECX, + (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)), + (implicit EFLAGS)]>, OpSize; } } } diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp index ce06f0f..c69cc83 100644 --- a/lib/Target/X86/X86JITInfo.cpp +++ b/lib/Target/X86/X86JITInfo.cpp @@ -426,16 +426,19 @@ X86JITInfo::X86JITInfo(X86TargetMachine &tm) : TM(tm) { void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; #if defined (X86_64_JIT) - JCE.startGVStub(BS, GV, 8, 8); - JCE.emitWordLE((unsigned)(intptr_t)ptr); - JCE.emitWordLE((unsigned)(((intptr_t)ptr) >> 32)); + const unsigned Alignment = 8; + uint8_t Buffer[8]; + uint8_t *Cur = Buffer; + MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(intptr_t)ptr); + MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(((intptr_t)ptr) >> 32)); #else - JCE.startGVStub(BS, GV, 4, 4); - JCE.emitWordLE((intptr_t)ptr); + const unsigned Alignment = 4; + uint8_t Buffer[4]; + uint8_t *Cur = Buffer; + MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)ptr); #endif - return JCE.finishGVStub(BS); + return JCE.allocIndirectGV(GV, Buffer, sizeof(Buffer), Alignment); } TargetJITInfo::StubLayout X86JITInfo::getStubLayout() { @@ -451,7 +454,6 @@ TargetJITInfo::StubLayout X86JITInfo::getStubLayout() { void *X86JITInfo::emitFunctionStub(const Function* F, void *Target, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; // Note, we cast to intptr_t here to silence a -pedantic warning that // complains about casting a function pointer to a normal pointer. #if defined (X86_32_JIT) && !defined (_MSC_VER) diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index 7bf074d..6db0cc3 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -195,6 +195,36 @@ let Namespace = "X86" in { def ES : Register<"es">; def FS : Register<"fs">; def GS : Register<"gs">; + + // Debug registers + def DR0 : Register<"dr0">; + def DR1 : Register<"dr1">; + def DR2 : Register<"dr2">; + def DR3 : Register<"dr3">; + def DR4 : Register<"dr4">; + def DR5 : Register<"dr5">; + def DR6 : Register<"dr6">; + def DR7 : Register<"dr7">; + + // Condition registers + def ECR0 : Register<"ecr0">; + def ECR1 : Register<"ecr1">; + def ECR2 : Register<"ecr2">; + def ECR3 : Register<"ecr3">; + def ECR4 : Register<"ecr4">; + def ECR5 : Register<"ecr5">; + def ECR6 : Register<"ecr6">; + def ECR7 : Register<"ecr7">; + + def RCR0 : Register<"rcr0">; + def RCR1 : Register<"rcr1">; + def RCR2 : Register<"rcr2">; + def RCR3 : Register<"rcr3">; + def RCR4 : Register<"rcr4">; + def RCR5 : Register<"rcr5">; + def RCR6 : Register<"rcr6">; + def RCR7 : Register<"rcr7">; + def RCR8 : Register<"rcr8">; } @@ -446,6 +476,22 @@ def GR64 : RegisterClass<"X86", [i64], 64, def SEGMENT_REG : RegisterClass<"X86", [i16], 16, [CS, DS, SS, ES, FS, GS]> { } +// Debug registers. +def DEBUG_REG : RegisterClass<"X86", [i32], 32, + [DR0, DR1, DR2, DR3, DR4, DR5, DR6, DR7]> { +} + +// Control registers. +def CONTROL_REG_32 : RegisterClass<"X86", [i32], 32, + [ECR0, ECR1, ECR2, ECR3, ECR4, ECR5, ECR6, + ECR7]> { +} + +def CONTROL_REG_64 : RegisterClass<"X86", [i64], 64, + [RCR0, RCR1, RCR2, RCR3, RCR4, RCR5, RCR6, + RCR7, RCR8]> { +} + // GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD, GR32_ABCD, GR64_ABCD - Subclasses of // GR8, GR16, GR32, and GR64 which contain just the "a" "b", "c", and "d" // registers. On x86-32, GR16_ABCD and GR32_ABCD are classes for registers @@ -661,7 +707,8 @@ def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64, }]; let MethodBodies = [{ GR64_NOREX_NOSPClass::iterator - GR64_NOREX_NOSPClass::allocation_order_end(const MachineFunction &MF) const { + GR64_NOREX_NOSPClass::allocation_order_end(const MachineFunction &MF) const + { const TargetMachine &TM = MF.getTarget(); const TargetRegisterInfo *RI = TM.getRegisterInfo(); // Does the function dedicate RBP to being a frame ptr? diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index fb457dd..ef6dbaf 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -77,7 +77,7 @@ protected: /// IsBTMemSlow - True if BT (bit test) of memory instructions are slow. bool IsBTMemSlow; - + /// DarwinVers - Nonzero if this is a darwin platform: the numeric /// version of the platform, e.g. 8 = 10.4 (Tiger), 9 = 10.5 (Leopard), etc. unsigned char DarwinVers; // Is any darwin-x86 platform. @@ -169,8 +169,11 @@ public: p = "e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-n8:16:32:64"; else if (isTargetDarwin()) p = "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-n8:16:32"; + else if (isTargetCygMing() || isTargetWindows()) + p = "e-p:32:32-f64:64:64-i64:64:64-f80:128:128-n8:16:32"; else p = "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32"; + return std::string(p); } diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 0152121..962f0f7 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -91,10 +91,6 @@ X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT, assert(getRelocationModel() != Reloc::Default && "Relocation mode not picked"); - // If no code model is picked, default to small. - if (getCodeModel() == CodeModel::Default) - setCodeModel(CodeModel::Small); - // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC // is defined as a model for code which may be used in static or dynamic // executables but not necessarily a shared library. On X86-32 we just @@ -184,10 +180,6 @@ bool X86TargetMachine::addCodeEmitter(PassManagerBase &PM, Subtarget.setPICStyle(PICStyles::None); } - // 64-bit JIT places everything in the same buffer except external functions. - if (Subtarget.is64Bit()) - setCodeModel(CodeModel::Large); - PM.add(createX86CodeEmitterPass(*this, MCE)); return false; @@ -204,9 +196,6 @@ bool X86TargetMachine::addCodeEmitter(PassManagerBase &PM, Subtarget.setPICStyle(PICStyles::None); } - // 64-bit JIT places everything in the same buffer except external functions. - if (Subtarget.is64Bit()) - setCodeModel(CodeModel::Large); PM.add(createX86JITCodeEmitterPass(*this, JCE)); @@ -240,3 +229,23 @@ bool X86TargetMachine::addSimpleCodeEmitter(PassManagerBase &PM, PM.add(createX86ObjectCodeEmitterPass(*this, OCE)); return false; } + +void X86TargetMachine::setCodeModelForStatic() { + + if (getCodeModel() != CodeModel::Default) return; + + // For static codegen, if we're not already set, use Small codegen. + setCodeModel(CodeModel::Small); +} + + +void X86TargetMachine::setCodeModelForJIT() { + + if (getCodeModel() != CodeModel::Default) return; + + // 64-bit JIT places everything in the same buffer except external functions. + if (Subtarget.is64Bit()) + setCodeModel(CodeModel::Large); + else + setCodeModel(CodeModel::Small); +} diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h index b538408..6183e91 100644 --- a/lib/Target/X86/X86TargetMachine.h +++ b/lib/Target/X86/X86TargetMachine.h @@ -38,6 +38,11 @@ class X86TargetMachine : public LLVMTargetMachine { X86ELFWriterInfo ELFWriterInfo; Reloc::Model DefRelocModel; // Reloc model before it's overridden. +private: + // We have specific defaults for X86. + virtual void setCodeModelForJIT(); + virtual void setCodeModelForStatic(); + public: X86TargetMachine(const Target &T, const std::string &TT, const std::string &FS, bool is64Bit); |