diff options
Diffstat (limited to 'contrib/llvm/lib/Target/ARM/MCTargetDesc')
13 files changed, 3926 insertions, 57 deletions
diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h new file mode 100644 index 0000000..9982fa6 --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAddressingModes.h @@ -0,0 +1,667 @@ +//===- ARMAddressingModes.h - ARM Addressing Modes --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the ARM addressing mode implementation stuff. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_ARM_ARMADDRESSINGMODES_H +#define LLVM_TARGET_ARM_ARMADDRESSINGMODES_H + +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/APInt.h" +#include "llvm/Support/MathExtras.h" +#include <cassert> + +namespace llvm { + +/// ARM_AM - ARM Addressing Mode Stuff +namespace ARM_AM { + enum ShiftOpc { + no_shift = 0, + asr, + lsl, + lsr, + ror, + rrx + }; + + enum AddrOpc { + sub = 0, + add + }; + + static inline const char *getAddrOpcStr(AddrOpc Op) { + return Op == sub ? "-" : ""; + } + + static inline const char *getShiftOpcStr(ShiftOpc Op) { + switch (Op) { + default: assert(0 && "Unknown shift opc!"); + case ARM_AM::asr: return "asr"; + case ARM_AM::lsl: return "lsl"; + case ARM_AM::lsr: return "lsr"; + case ARM_AM::ror: return "ror"; + case ARM_AM::rrx: return "rrx"; + } + } + + static inline unsigned getShiftOpcEncoding(ShiftOpc Op) { + switch (Op) { + default: assert(0 && "Unknown shift opc!"); + case ARM_AM::asr: return 2; + case ARM_AM::lsl: return 0; + case ARM_AM::lsr: return 1; + case ARM_AM::ror: return 3; + } + } + + enum AMSubMode { + bad_am_submode = 0, + ia, + ib, + da, + db + }; + + static inline const char *getAMSubModeStr(AMSubMode Mode) { + switch (Mode) { + default: assert(0 && "Unknown addressing sub-mode!"); + case ARM_AM::ia: return "ia"; + case ARM_AM::ib: return "ib"; + case ARM_AM::da: return "da"; + case ARM_AM::db: return "db"; + } + } + + /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits. + /// + static inline unsigned rotr32(unsigned Val, unsigned Amt) { + assert(Amt < 32 && "Invalid rotate amount"); + return (Val >> Amt) | (Val << ((32-Amt)&31)); + } + + /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits. + /// + static inline unsigned rotl32(unsigned Val, unsigned Amt) { + assert(Amt < 32 && "Invalid rotate amount"); + return (Val << Amt) | (Val >> ((32-Amt)&31)); + } + + //===--------------------------------------------------------------------===// + // Addressing Mode #1: shift_operand with registers + //===--------------------------------------------------------------------===// + // + // This 'addressing mode' is used for arithmetic instructions. It can + // represent things like: + // reg + // reg [asr|lsl|lsr|ror|rrx] reg + // reg [asr|lsl|lsr|ror|rrx] imm + // + // This is stored three operands [rega, regb, opc]. The first is the base + // reg, the second is the shift amount (or reg0 if not present or imm). The + // third operand encodes the shift opcode and the imm if a reg isn't present. + // + static inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) { + return ShOp | (Imm << 3); + } + static inline unsigned getSORegOffset(unsigned Op) { + return Op >> 3; + } + static inline ShiftOpc getSORegShOp(unsigned Op) { + return (ShiftOpc)(Op & 7); + } + + /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return + /// the 8-bit imm value. + static inline unsigned getSOImmValImm(unsigned Imm) { + return Imm & 0xFF; + } + /// getSOImmValRot - Given an encoded imm field for the reg/imm form, return + /// the rotate amount. + static inline unsigned getSOImmValRot(unsigned Imm) { + return (Imm >> 8) * 2; + } + + /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand, + /// computing the rotate amount to use. If this immediate value cannot be + /// handled with a single shifter-op, determine a good rotate amount that will + /// take a maximal chunk of bits out of the immediate. + static inline unsigned getSOImmValRotate(unsigned Imm) { + // 8-bit (or less) immediates are trivially shifter_operands with a rotate + // of zero. + if ((Imm & ~255U) == 0) return 0; + + // Use CTZ to compute the rotate amount. + unsigned TZ = CountTrailingZeros_32(Imm); + + // Rotate amount must be even. Something like 0x200 must be rotated 8 bits, + // not 9. + unsigned RotAmt = TZ & ~1; + + // If we can handle this spread, return it. + if ((rotr32(Imm, RotAmt) & ~255U) == 0) + return (32-RotAmt)&31; // HW rotates right, not left. + + // For values like 0xF000000F, we should ignore the low 6 bits, then + // retry the hunt. + if (Imm & 63U) { + unsigned TZ2 = CountTrailingZeros_32(Imm & ~63U); + unsigned RotAmt2 = TZ2 & ~1; + if ((rotr32(Imm, RotAmt2) & ~255U) == 0) + return (32-RotAmt2)&31; // HW rotates right, not left. + } + + // Otherwise, we have no way to cover this span of bits with a single + // shifter_op immediate. Return a chunk of bits that will be useful to + // handle. + return (32-RotAmt)&31; // HW rotates right, not left. + } + + /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit + /// into an shifter_operand immediate operand, return the 12-bit encoding for + /// it. If not, return -1. + static inline int getSOImmVal(unsigned Arg) { + // 8-bit (or less) immediates are trivially shifter_operands with a rotate + // of zero. + if ((Arg & ~255U) == 0) return Arg; + + unsigned RotAmt = getSOImmValRotate(Arg); + + // If this cannot be handled with a single shifter_op, bail out. + if (rotr32(~255U, RotAmt) & Arg) + return -1; + + // Encode this correctly. + return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8); + } + + /// isSOImmTwoPartVal - Return true if the specified value can be obtained by + /// or'ing together two SOImmVal's. + static inline bool isSOImmTwoPartVal(unsigned V) { + // If this can be handled with a single shifter_op, bail out. + V = rotr32(~255U, getSOImmValRotate(V)) & V; + if (V == 0) + return false; + + // If this can be handled with two shifter_op's, accept. + V = rotr32(~255U, getSOImmValRotate(V)) & V; + return V == 0; + } + + /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, + /// return the first chunk of it. + static inline unsigned getSOImmTwoPartFirst(unsigned V) { + return rotr32(255U, getSOImmValRotate(V)) & V; + } + + /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, + /// return the second chunk of it. + static inline unsigned getSOImmTwoPartSecond(unsigned V) { + // Mask out the first hunk. + V = rotr32(~255U, getSOImmValRotate(V)) & V; + + // Take what's left. + assert(V == (rotr32(255U, getSOImmValRotate(V)) & V)); + return V; + } + + /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed + /// by a left shift. Returns the shift amount to use. + static inline unsigned getThumbImmValShift(unsigned Imm) { + // 8-bit (or less) immediates are trivially immediate operand with a shift + // of zero. + if ((Imm & ~255U) == 0) return 0; + + // Use CTZ to compute the shift amount. + return CountTrailingZeros_32(Imm); + } + + /// isThumbImmShiftedVal - Return true if the specified value can be obtained + /// by left shifting a 8-bit immediate. + static inline bool isThumbImmShiftedVal(unsigned V) { + // If this can be handled with + V = (~255U << getThumbImmValShift(V)) & V; + return V == 0; + } + + /// getThumbImm16ValShift - Try to handle Imm with a 16-bit immediate followed + /// by a left shift. Returns the shift amount to use. + static inline unsigned getThumbImm16ValShift(unsigned Imm) { + // 16-bit (or less) immediates are trivially immediate operand with a shift + // of zero. + if ((Imm & ~65535U) == 0) return 0; + + // Use CTZ to compute the shift amount. + return CountTrailingZeros_32(Imm); + } + + /// isThumbImm16ShiftedVal - Return true if the specified value can be + /// obtained by left shifting a 16-bit immediate. + static inline bool isThumbImm16ShiftedVal(unsigned V) { + // If this can be handled with + V = (~65535U << getThumbImm16ValShift(V)) & V; + return V == 0; + } + + /// getThumbImmNonShiftedVal - If V is a value that satisfies + /// isThumbImmShiftedVal, return the non-shiftd value. + static inline unsigned getThumbImmNonShiftedVal(unsigned V) { + return V >> getThumbImmValShift(V); + } + + + /// getT2SOImmValSplat - Return the 12-bit encoded representation + /// if the specified value can be obtained by splatting the low 8 bits + /// into every other byte or every byte of a 32-bit value. i.e., + /// 00000000 00000000 00000000 abcdefgh control = 0 + /// 00000000 abcdefgh 00000000 abcdefgh control = 1 + /// abcdefgh 00000000 abcdefgh 00000000 control = 2 + /// abcdefgh abcdefgh abcdefgh abcdefgh control = 3 + /// Return -1 if none of the above apply. + /// See ARM Reference Manual A6.3.2. + static inline int getT2SOImmValSplatVal(unsigned V) { + unsigned u, Vs, Imm; + // control = 0 + if ((V & 0xffffff00) == 0) + return V; + + // If the value is zeroes in the first byte, just shift those off + Vs = ((V & 0xff) == 0) ? V >> 8 : V; + // Any passing value only has 8 bits of payload, splatted across the word + Imm = Vs & 0xff; + // Likewise, any passing values have the payload splatted into the 3rd byte + u = Imm | (Imm << 16); + + // control = 1 or 2 + if (Vs == u) + return (((Vs == V) ? 1 : 2) << 8) | Imm; + + // control = 3 + if (Vs == (u | (u << 8))) + return (3 << 8) | Imm; + + return -1; + } + + /// getT2SOImmValRotateVal - Return the 12-bit encoded representation if the + /// specified value is a rotated 8-bit value. Return -1 if no rotation + /// encoding is possible. + /// See ARM Reference Manual A6.3.2. + static inline int getT2SOImmValRotateVal(unsigned V) { + unsigned RotAmt = CountLeadingZeros_32(V); + if (RotAmt >= 24) + return -1; + + // If 'Arg' can be handled with a single shifter_op return the value. + if ((rotr32(0xff000000U, RotAmt) & V) == V) + return (rotr32(V, 24 - RotAmt) & 0x7f) | ((RotAmt + 8) << 7); + + return -1; + } + + /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit + /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit + /// encoding for it. If not, return -1. + /// See ARM Reference Manual A6.3.2. + static inline int getT2SOImmVal(unsigned Arg) { + // If 'Arg' is an 8-bit splat, then get the encoded value. + int Splat = getT2SOImmValSplatVal(Arg); + if (Splat != -1) + return Splat; + + // If 'Arg' can be handled with a single shifter_op return the value. + int Rot = getT2SOImmValRotateVal(Arg); + if (Rot != -1) + return Rot; + + return -1; + } + + static inline unsigned getT2SOImmValRotate(unsigned V) { + if ((V & ~255U) == 0) return 0; + // Use CTZ to compute the rotate amount. + unsigned RotAmt = CountTrailingZeros_32(V); + return (32 - RotAmt) & 31; + } + + static inline bool isT2SOImmTwoPartVal (unsigned Imm) { + unsigned V = Imm; + // Passing values can be any combination of splat values and shifter + // values. If this can be handled with a single shifter or splat, bail + // out. Those should be handled directly, not with a two-part val. + if (getT2SOImmValSplatVal(V) != -1) + return false; + V = rotr32 (~255U, getT2SOImmValRotate(V)) & V; + if (V == 0) + return false; + + // If this can be handled as an immediate, accept. + if (getT2SOImmVal(V) != -1) return true; + + // Likewise, try masking out a splat value first. + V = Imm; + if (getT2SOImmValSplatVal(V & 0xff00ff00U) != -1) + V &= ~0xff00ff00U; + else if (getT2SOImmValSplatVal(V & 0x00ff00ffU) != -1) + V &= ~0x00ff00ffU; + // If what's left can be handled as an immediate, accept. + if (getT2SOImmVal(V) != -1) return true; + + // Otherwise, do not accept. + return false; + } + + static inline unsigned getT2SOImmTwoPartFirst(unsigned Imm) { + assert (isT2SOImmTwoPartVal(Imm) && + "Immedate cannot be encoded as two part immediate!"); + // Try a shifter operand as one part + unsigned V = rotr32 (~255, getT2SOImmValRotate(Imm)) & Imm; + // If the rest is encodable as an immediate, then return it. + if (getT2SOImmVal(V) != -1) return V; + + // Try masking out a splat value first. + if (getT2SOImmValSplatVal(Imm & 0xff00ff00U) != -1) + return Imm & 0xff00ff00U; + + // The other splat is all that's left as an option. + assert (getT2SOImmValSplatVal(Imm & 0x00ff00ffU) != -1); + return Imm & 0x00ff00ffU; + } + + static inline unsigned getT2SOImmTwoPartSecond(unsigned Imm) { + // Mask out the first hunk + Imm ^= getT2SOImmTwoPartFirst(Imm); + // Return what's left + assert (getT2SOImmVal(Imm) != -1 && + "Unable to encode second part of T2 two part SO immediate"); + return Imm; + } + + + //===--------------------------------------------------------------------===// + // Addressing Mode #2 + //===--------------------------------------------------------------------===// + // + // This is used for most simple load/store instructions. + // + // addrmode2 := reg +/- reg shop imm + // addrmode2 := reg +/- imm12 + // + // The first operand is always a Reg. The second operand is a reg if in + // reg/reg form, otherwise it's reg#0. The third field encodes the operation + // in bit 12, the immediate in bits 0-11, and the shift op in 13-15. The + // fourth operand 16-17 encodes the index mode. + // + // If this addressing mode is a frame index (before prolog/epilog insertion + // and code rewriting), this operand will have the form: FI#, reg0, <offs> + // with no shift amount for the frame offset. + // + static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, + unsigned IdxMode = 0) { + assert(Imm12 < (1 << 12) && "Imm too large!"); + bool isSub = Opc == sub; + return Imm12 | ((int)isSub << 12) | (SO << 13) | (IdxMode << 16) ; + } + static inline unsigned getAM2Offset(unsigned AM2Opc) { + return AM2Opc & ((1 << 12)-1); + } + static inline AddrOpc getAM2Op(unsigned AM2Opc) { + return ((AM2Opc >> 12) & 1) ? sub : add; + } + static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) { + return (ShiftOpc)((AM2Opc >> 13) & 7); + } + static inline unsigned getAM2IdxMode(unsigned AM2Opc) { + return (AM2Opc >> 16); + } + + + //===--------------------------------------------------------------------===// + // Addressing Mode #3 + //===--------------------------------------------------------------------===// + // + // This is used for sign-extending loads, and load/store-pair instructions. + // + // addrmode3 := reg +/- reg + // addrmode3 := reg +/- imm8 + // + // The first operand is always a Reg. The second operand is a reg if in + // reg/reg form, otherwise it's reg#0. The third field encodes the operation + // in bit 8, the immediate in bits 0-7. The fourth operand 9-10 encodes the + // index mode. + + /// getAM3Opc - This function encodes the addrmode3 opc field. + static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, + unsigned IdxMode = 0) { + bool isSub = Opc == sub; + return ((int)isSub << 8) | Offset | (IdxMode << 9); + } + static inline unsigned char getAM3Offset(unsigned AM3Opc) { + return AM3Opc & 0xFF; + } + static inline AddrOpc getAM3Op(unsigned AM3Opc) { + return ((AM3Opc >> 8) & 1) ? sub : add; + } + static inline unsigned getAM3IdxMode(unsigned AM3Opc) { + return (AM3Opc >> 9); + } + + //===--------------------------------------------------------------------===// + // Addressing Mode #4 + //===--------------------------------------------------------------------===// + // + // This is used for load / store multiple instructions. + // + // addrmode4 := reg, <mode> + // + // The four modes are: + // IA - Increment after + // IB - Increment before + // DA - Decrement after + // DB - Decrement before + // For VFP instructions, only the IA and DB modes are valid. + + static inline AMSubMode getAM4SubMode(unsigned Mode) { + return (AMSubMode)(Mode & 0x7); + } + + static inline unsigned getAM4ModeImm(AMSubMode SubMode) { + return (int)SubMode; + } + + //===--------------------------------------------------------------------===// + // Addressing Mode #5 + //===--------------------------------------------------------------------===// + // + // This is used for coprocessor instructions, such as FP load/stores. + // + // addrmode5 := reg +/- imm8*4 + // + // The first operand is always a Reg. The second operand encodes the + // operation in bit 8 and the immediate in bits 0-7. + + /// getAM5Opc - This function encodes the addrmode5 opc field. + static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) { + bool isSub = Opc == sub; + return ((int)isSub << 8) | Offset; + } + static inline unsigned char getAM5Offset(unsigned AM5Opc) { + return AM5Opc & 0xFF; + } + static inline AddrOpc getAM5Op(unsigned AM5Opc) { + return ((AM5Opc >> 8) & 1) ? sub : add; + } + + //===--------------------------------------------------------------------===// + // Addressing Mode #6 + //===--------------------------------------------------------------------===// + // + // This is used for NEON load / store instructions. + // + // addrmode6 := reg with optional alignment + // + // This is stored in two operands [regaddr, align]. The first is the + // address register. The second operand is the value of the alignment + // specifier in bytes or zero if no explicit alignment. + // Valid alignments depend on the specific instruction. + + //===--------------------------------------------------------------------===// + // NEON Modified Immediates + //===--------------------------------------------------------------------===// + // + // Several NEON instructions (e.g., VMOV) take a "modified immediate" + // vector operand, where a small immediate encoded in the instruction + // specifies a full NEON vector value. These modified immediates are + // represented here as encoded integers. The low 8 bits hold the immediate + // value; bit 12 holds the "Op" field of the instruction, and bits 11-8 hold + // the "Cmode" field of the instruction. The interfaces below treat the + // Op and Cmode values as a single 5-bit value. + + static inline unsigned createNEONModImm(unsigned OpCmode, unsigned Val) { + return (OpCmode << 8) | Val; + } + static inline unsigned getNEONModImmOpCmode(unsigned ModImm) { + return (ModImm >> 8) & 0x1f; + } + static inline unsigned getNEONModImmVal(unsigned ModImm) { + return ModImm & 0xff; + } + + /// decodeNEONModImm - Decode a NEON modified immediate value into the + /// element value and the element size in bits. (If the element size is + /// smaller than the vector, it is splatted into all the elements.) + static inline uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits) { + unsigned OpCmode = getNEONModImmOpCmode(ModImm); + unsigned Imm8 = getNEONModImmVal(ModImm); + uint64_t Val = 0; + + if (OpCmode == 0xe) { + // 8-bit vector elements + Val = Imm8; + EltBits = 8; + } else if ((OpCmode & 0xc) == 0x8) { + // 16-bit vector elements + unsigned ByteNum = (OpCmode & 0x6) >> 1; + Val = Imm8 << (8 * ByteNum); + EltBits = 16; + } else if ((OpCmode & 0x8) == 0) { + // 32-bit vector elements, zero with one byte set + unsigned ByteNum = (OpCmode & 0x6) >> 1; + Val = Imm8 << (8 * ByteNum); + EltBits = 32; + } else if ((OpCmode & 0xe) == 0xc) { + // 32-bit vector elements, one byte with low bits set + unsigned ByteNum = 1 + (OpCmode & 0x1); + Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (2 - ByteNum))); + EltBits = 32; + } else if (OpCmode == 0x1e) { + // 64-bit vector elements + for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) { + if ((ModImm >> ByteNum) & 1) + Val |= (uint64_t)0xff << (8 * ByteNum); + } + EltBits = 64; + } else { + assert(false && "Unsupported NEON immediate"); + } + return Val; + } + + AMSubMode getLoadStoreMultipleSubMode(int Opcode); + + //===--------------------------------------------------------------------===// + // Floating-point Immediates + // + static inline float getFPImmFloat(unsigned Imm) { + // We expect an 8-bit binary encoding of a floating-point number here. + union { + uint32_t I; + float F; + } FPUnion; + + uint8_t Sign = (Imm >> 7) & 0x1; + uint8_t Exp = (Imm >> 4) & 0x7; + uint8_t Mantissa = Imm & 0xf; + + // 8-bit FP iEEEE Float Encoding + // abcd efgh aBbbbbbc defgh000 00000000 00000000 + // + // where B = NOT(b); + + FPUnion.I = 0; + FPUnion.I |= Sign << 31; + FPUnion.I |= ((Exp & 0x4) != 0 ? 0 : 1) << 30; + FPUnion.I |= ((Exp & 0x4) != 0 ? 0x1f : 0) << 25; + FPUnion.I |= (Exp & 0x3) << 23; + FPUnion.I |= Mantissa << 19; + return FPUnion.F; + } + + /// getFP32Imm - Return an 8-bit floating-point version of the 32-bit + /// floating-point value. If the value cannot be represented as an 8-bit + /// floating-point value, then return -1. + static inline int getFP32Imm(const APInt &Imm) { + uint32_t Sign = Imm.lshr(31).getZExtValue() & 1; + int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127 + int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits + + // We can handle 4 bits of mantissa. + // mantissa = (16+UInt(e:f:g:h))/16. + if (Mantissa & 0x7ffff) + return -1; + Mantissa >>= 19; + if ((Mantissa & 0xf) != Mantissa) + return -1; + + // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 + if (Exp < -3 || Exp > 4) + return -1; + Exp = ((Exp+3) & 0x7) ^ 4; + + return ((int)Sign << 7) | (Exp << 4) | Mantissa; + } + + static inline int getFP32Imm(const APFloat &FPImm) { + return getFP32Imm(FPImm.bitcastToAPInt()); + } + + /// getFP64Imm - Return an 8-bit floating-point version of the 64-bit + /// floating-point value. If the value cannot be represented as an 8-bit + /// floating-point value, then return -1. + static inline int getFP64Imm(const APInt &Imm) { + uint64_t Sign = Imm.lshr(63).getZExtValue() & 1; + int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023 + uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffULL; + + // We can handle 4 bits of mantissa. + // mantissa = (16+UInt(e:f:g:h))/16. + if (Mantissa & 0xffffffffffffULL) + return -1; + Mantissa >>= 48; + if ((Mantissa & 0xf) != Mantissa) + return -1; + + // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3 + if (Exp < -3 || Exp > 4) + return -1; + Exp = ((Exp+3) & 0x7) ^ 4; + + return ((int)Sign << 7) | (Exp << 4) | Mantissa; + } + + static inline int getFP64Imm(const APFloat &FPImm) { + return getFP64Imm(FPImm.bitcastToAPInt()); + } + +} // end namespace ARM_AM +} // end namespace llvm + +#endif + diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp new file mode 100644 index 0000000..c31c5e6 --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -0,0 +1,531 @@ +//===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/ARMMCTargetDesc.h" +#include "MCTargetDesc/ARMBaseInfo.h" +#include "MCTargetDesc/ARMFixupKinds.h" +#include "MCTargetDesc/ARMAddressingModes.h" +#include "llvm/ADT/Twine.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCDirectives.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCMachObjectWriter.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/MC/MCSectionMachO.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Object/MachOFormat.h" +#include "llvm/Support/ELF.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +namespace { +class ARMELFObjectWriter : public MCELFObjectTargetWriter { +public: + ARMELFObjectWriter(Triple::OSType OSType) + : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSType, ELF::EM_ARM, + /*HasRelocationAddend*/ false) {} +}; + +class ARMAsmBackend : public MCAsmBackend { + const MCSubtargetInfo* STI; + bool isThumbMode; // Currently emitting Thumb code. +public: + ARMAsmBackend(const Target &T, const StringRef TT) + : MCAsmBackend(), STI(ARM_MC::createARMMCSubtargetInfo(TT, "", "")), + isThumbMode(TT.startswith("thumb")) {} + + ~ARMAsmBackend() { + delete STI; + } + + unsigned getNumFixupKinds() const { return ARM::NumTargetFixupKinds; } + + bool hasNOP() const { + return (STI->getFeatureBits() & ARM::HasV6T2Ops) != 0; + } + + const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const { + const static MCFixupKindInfo Infos[ARM::NumTargetFixupKinds] = { +// This table *must* be in the order that the fixup_* kinds are defined in +// ARMFixupKinds.h. +// +// Name Offset (bits) Size (bits) Flags +{ "fixup_arm_ldst_pcrel_12", 1, 24, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_t2_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel | + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, +{ "fixup_arm_pcrel_10", 1, 24, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_t2_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel | + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, +{ "fixup_thumb_adr_pcrel_10",0, 8, MCFixupKindInfo::FKF_IsPCRel | + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, +{ "fixup_arm_adr_pcrel_12", 1, 24, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_t2_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel | + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, +{ "fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_thumb_cp", 0, 8, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel }, +// movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 - 19. +{ "fixup_arm_movt_hi16", 0, 20, 0 }, +{ "fixup_arm_movw_lo16", 0, 20, 0 }, +{ "fixup_t2_movt_hi16", 0, 20, 0 }, +{ "fixup_t2_movw_lo16", 0, 20, 0 }, +{ "fixup_arm_movt_hi16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_arm_movw_lo16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_t2_movt_hi16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_t2_movw_lo16_pcrel", 0, 20, MCFixupKindInfo::FKF_IsPCRel }, + }; + + if (Kind < FirstTargetFixupKind) + return MCAsmBackend::getFixupKindInfo(Kind); + + assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && + "Invalid kind!"); + return Infos[Kind - FirstTargetFixupKind]; + } + + bool MayNeedRelaxation(const MCInst &Inst) const; + + void RelaxInstruction(const MCInst &Inst, MCInst &Res) const; + + bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const; + + void HandleAssemblerFlag(MCAssemblerFlag Flag) { + switch (Flag) { + default: break; + case MCAF_Code16: + setIsThumb(true); + break; + case MCAF_Code32: + setIsThumb(false); + break; + } + } + + unsigned getPointerSize() const { return 4; } + bool isThumb() const { return isThumbMode; } + void setIsThumb(bool it) { isThumbMode = it; } +}; +} // end anonymous namespace + +bool ARMAsmBackend::MayNeedRelaxation(const MCInst &Inst) const { + // FIXME: Thumb targets, different move constant targets.. + return false; +} + +void ARMAsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const { + assert(0 && "ARMAsmBackend::RelaxInstruction() unimplemented"); + return; +} + +bool ARMAsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const { + const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 + const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP + const uint32_t ARMv4_NopEncoding = 0xe1a0000; // using MOV r0,r0 + const uint32_t ARMv6T2_NopEncoding = 0xe3207800; // NOP + if (isThumb()) { + const uint16_t nopEncoding = hasNOP() ? Thumb2_16bitNopEncoding + : Thumb1_16bitNopEncoding; + uint64_t NumNops = Count / 2; + for (uint64_t i = 0; i != NumNops; ++i) + OW->Write16(nopEncoding); + if (Count & 1) + OW->Write8(0); + return true; + } + // ARM mode + const uint32_t nopEncoding = hasNOP() ? ARMv6T2_NopEncoding + : ARMv4_NopEncoding; + uint64_t NumNops = Count / 4; + for (uint64_t i = 0; i != NumNops; ++i) + OW->Write32(nopEncoding); + // FIXME: should this function return false when unable to write exactly + // 'Count' bytes with NOP encodings? + switch (Count % 4) { + default: break; // No leftover bytes to write + case 1: OW->Write8(0); break; + case 2: OW->Write16(0); break; + case 3: OW->Write16(0); OW->Write8(0xa0); break; + } + + return true; +} + +static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { + switch (Kind) { + default: + llvm_unreachable("Unknown fixup kind!"); + case FK_Data_1: + case FK_Data_2: + case FK_Data_4: + return Value; + case ARM::fixup_arm_movt_hi16: + Value >>= 16; + // Fallthrough + case ARM::fixup_arm_movw_lo16: + case ARM::fixup_arm_movt_hi16_pcrel: + case ARM::fixup_arm_movw_lo16_pcrel: { + unsigned Hi4 = (Value & 0xF000) >> 12; + unsigned Lo12 = Value & 0x0FFF; + // inst{19-16} = Hi4; + // inst{11-0} = Lo12; + Value = (Hi4 << 16) | (Lo12); + return Value; + } + case ARM::fixup_t2_movt_hi16: + Value >>= 16; + // Fallthrough + case ARM::fixup_t2_movw_lo16: + case ARM::fixup_t2_movt_hi16_pcrel: //FIXME: Shouldn't this be shifted like + // the other hi16 fixup? + case ARM::fixup_t2_movw_lo16_pcrel: { + unsigned Hi4 = (Value & 0xF000) >> 12; + unsigned i = (Value & 0x800) >> 11; + unsigned Mid3 = (Value & 0x700) >> 8; + unsigned Lo8 = Value & 0x0FF; + // inst{19-16} = Hi4; + // inst{26} = i; + // inst{14-12} = Mid3; + // inst{7-0} = Lo8; + Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); + uint64_t swapped = (Value & 0xFFFF0000) >> 16; + swapped |= (Value & 0x0000FFFF) << 16; + return swapped; + } + case ARM::fixup_arm_ldst_pcrel_12: + // ARM PC-relative values are offset by 8. + Value -= 4; + // FALLTHROUGH + case ARM::fixup_t2_ldst_pcrel_12: { + // Offset by 4, adjusted by two due to the half-word ordering of thumb. + Value -= 4; + bool isAdd = true; + if ((int64_t)Value < 0) { + Value = -Value; + isAdd = false; + } + assert ((Value < 4096) && "Out of range pc-relative fixup value!"); + Value |= isAdd << 23; + + // Same addressing mode as fixup_arm_pcrel_10, + // but with 16-bit halfwords swapped. + if (Kind == ARM::fixup_t2_ldst_pcrel_12) { + uint64_t swapped = (Value & 0xFFFF0000) >> 16; + swapped |= (Value & 0x0000FFFF) << 16; + return swapped; + } + + return Value; + } + case ARM::fixup_thumb_adr_pcrel_10: + return ((Value - 4) >> 2) & 0xff; + case ARM::fixup_arm_adr_pcrel_12: { + // ARM PC-relative values are offset by 8. + Value -= 8; + unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 + if ((int64_t)Value < 0) { + Value = -Value; + opc = 2; // 0b0010 + } + assert(ARM_AM::getSOImmVal(Value) != -1 && + "Out of range pc-relative fixup value!"); + // Encode the immediate and shift the opcode into place. + return ARM_AM::getSOImmVal(Value) | (opc << 21); + } + + case ARM::fixup_t2_adr_pcrel_12: { + Value -= 4; + unsigned opc = 0; + if ((int64_t)Value < 0) { + Value = -Value; + opc = 5; + } + + uint32_t out = (opc << 21); + out |= (Value & 0x800) << 15; + out |= (Value & 0x700) << 4; + out |= (Value & 0x0FF); + + uint64_t swapped = (out & 0xFFFF0000) >> 16; + swapped |= (out & 0x0000FFFF) << 16; + return swapped; + } + + case ARM::fixup_arm_condbranch: + case ARM::fixup_arm_uncondbranch: + // These values don't encode the low two bits since they're always zero. + // Offset by 8 just as above. + return 0xffffff & ((Value - 8) >> 2); + case ARM::fixup_t2_uncondbranch: { + Value = Value - 4; + Value >>= 1; // Low bit is not encoded. + + uint32_t out = 0; + bool I = Value & 0x800000; + bool J1 = Value & 0x400000; + bool J2 = Value & 0x200000; + J1 ^= I; + J2 ^= I; + + out |= I << 26; // S bit + out |= !J1 << 13; // J1 bit + out |= !J2 << 11; // J2 bit + out |= (Value & 0x1FF800) << 5; // imm6 field + out |= (Value & 0x0007FF); // imm11 field + + uint64_t swapped = (out & 0xFFFF0000) >> 16; + swapped |= (out & 0x0000FFFF) << 16; + return swapped; + } + case ARM::fixup_t2_condbranch: { + Value = Value - 4; + Value >>= 1; // Low bit is not encoded. + + uint64_t out = 0; + out |= (Value & 0x80000) << 7; // S bit + out |= (Value & 0x40000) >> 7; // J2 bit + out |= (Value & 0x20000) >> 4; // J1 bit + out |= (Value & 0x1F800) << 5; // imm6 field + out |= (Value & 0x007FF); // imm11 field + + uint32_t swapped = (out & 0xFFFF0000) >> 16; + swapped |= (out & 0x0000FFFF) << 16; + return swapped; + } + case ARM::fixup_arm_thumb_bl: { + // The value doesn't encode the low bit (always zero) and is offset by + // four. The value is encoded into disjoint bit positions in the destination + // opcode. x = unchanged, I = immediate value bit, S = sign extension bit + // + // BL: xxxxxSIIIIIIIIII xxxxxIIIIIIIIIII + // + // Note that the halfwords are stored high first, low second; so we need + // to transpose the fixup value here to map properly. + unsigned isNeg = (int64_t(Value - 4) < 0) ? 1 : 0; + uint32_t Binary = 0; + Value = 0x3fffff & ((Value - 4) >> 1); + Binary = (Value & 0x7ff) << 16; // Low imm11 value. + Binary |= (Value & 0x1ffc00) >> 11; // High imm10 value. + Binary |= isNeg << 10; // Sign bit. + return Binary; + } + case ARM::fixup_arm_thumb_blx: { + // The value doesn't encode the low two bits (always zero) and is offset by + // four (see fixup_arm_thumb_cp). The value is encoded into disjoint bit + // positions in the destination opcode. x = unchanged, I = immediate value + // bit, S = sign extension bit, 0 = zero. + // + // BLX: xxxxxSIIIIIIIIII xxxxxIIIIIIIIII0 + // + // Note that the halfwords are stored high first, low second; so we need + // to transpose the fixup value here to map properly. + unsigned isNeg = (int64_t(Value-4) < 0) ? 1 : 0; + uint32_t Binary = 0; + Value = 0xfffff & ((Value - 2) >> 2); + Binary = (Value & 0x3ff) << 17; // Low imm10L value. + Binary |= (Value & 0xffc00) >> 10; // High imm10H value. + Binary |= isNeg << 10; // Sign bit. + return Binary; + } + case ARM::fixup_arm_thumb_cp: + // Offset by 4, and don't encode the low two bits. Two bytes of that + // 'off by 4' is implicitly handled by the half-word ordering of the + // Thumb encoding, so we only need to adjust by 2 here. + return ((Value - 2) >> 2) & 0xff; + case ARM::fixup_arm_thumb_cb: { + // Offset by 4 and don't encode the lower bit, which is always 0. + uint32_t Binary = (Value - 4) >> 1; + return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); + } + case ARM::fixup_arm_thumb_br: + // Offset by 4 and don't encode the lower bit, which is always 0. + return ((Value - 4) >> 1) & 0x7ff; + case ARM::fixup_arm_thumb_bcc: + // Offset by 4 and don't encode the lower bit, which is always 0. + return ((Value - 4) >> 1) & 0xff; + case ARM::fixup_arm_pcrel_10: + Value = Value - 4; // ARM fixups offset by an additional word and don't + // need to adjust for the half-word ordering. + // Fall through. + case ARM::fixup_t2_pcrel_10: { + // Offset by 4, adjusted by two due to the half-word ordering of thumb. + Value = Value - 4; + bool isAdd = true; + if ((int64_t)Value < 0) { + Value = -Value; + isAdd = false; + } + // These values don't encode the low two bits since they're always zero. + Value >>= 2; + assert ((Value < 256) && "Out of range pc-relative fixup value!"); + Value |= isAdd << 23; + + // Same addressing mode as fixup_arm_pcrel_10, + // but with 16-bit halfwords swapped. + if (Kind == ARM::fixup_t2_pcrel_10) { + uint32_t swapped = (Value & 0xFFFF0000) >> 16; + swapped |= (Value & 0x0000FFFF) << 16; + return swapped; + } + + return Value; + } + } +} + +namespace { + +// FIXME: This should be in a separate file. +// ELF is an ELF of course... +class ELFARMAsmBackend : public ARMAsmBackend { +public: + Triple::OSType OSType; + ELFARMAsmBackend(const Target &T, const StringRef TT, + Triple::OSType _OSType) + : ARMAsmBackend(T, TT), OSType(_OSType) { } + + void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, + uint64_t Value) const; + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createELFObjectWriter(new ARMELFObjectWriter(OSType), OS, + /*IsLittleEndian*/ true); + } +}; + +// FIXME: Raise this to share code between Darwin and ELF. +void ELFARMAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data, + unsigned DataSize, uint64_t Value) const { + unsigned NumBytes = 4; // FIXME: 2 for Thumb + Value = adjustFixupValue(Fixup.getKind(), Value); + if (!Value) return; // Doesn't change encoding. + + unsigned Offset = Fixup.getOffset(); + + // For each byte of the fragment that the fixup touches, mask in the bits from + // the fixup value. The Value has been "split up" into the appropriate + // bitfields above. + for (unsigned i = 0; i != NumBytes; ++i) + Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); +} + +// FIXME: This should be in a separate file. +class DarwinARMAsmBackend : public ARMAsmBackend { +public: + const object::mach::CPUSubtypeARM Subtype; + DarwinARMAsmBackend(const Target &T, const StringRef TT, + object::mach::CPUSubtypeARM st) + : ARMAsmBackend(T, TT), Subtype(st) { } + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createARMMachObjectWriter(OS, /*Is64Bit=*/false, + object::mach::CTM_ARM, + Subtype); + } + + void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, + uint64_t Value) const; + + virtual bool doesSectionRequireSymbols(const MCSection &Section) const { + return false; + } +}; + +/// getFixupKindNumBytes - The number of bytes the fixup may change. +static unsigned getFixupKindNumBytes(unsigned Kind) { + switch (Kind) { + default: + llvm_unreachable("Unknown fixup kind!"); + + case FK_Data_1: + case ARM::fixup_arm_thumb_bcc: + case ARM::fixup_arm_thumb_cp: + case ARM::fixup_thumb_adr_pcrel_10: + return 1; + + case FK_Data_2: + case ARM::fixup_arm_thumb_br: + case ARM::fixup_arm_thumb_cb: + return 2; + + case ARM::fixup_arm_ldst_pcrel_12: + case ARM::fixup_arm_pcrel_10: + case ARM::fixup_arm_adr_pcrel_12: + case ARM::fixup_arm_condbranch: + case ARM::fixup_arm_uncondbranch: + return 3; + + case FK_Data_4: + case ARM::fixup_t2_ldst_pcrel_12: + case ARM::fixup_t2_condbranch: + case ARM::fixup_t2_uncondbranch: + case ARM::fixup_t2_pcrel_10: + case ARM::fixup_t2_adr_pcrel_12: + case ARM::fixup_arm_thumb_bl: + case ARM::fixup_arm_thumb_blx: + case ARM::fixup_arm_movt_hi16: + case ARM::fixup_arm_movw_lo16: + case ARM::fixup_arm_movt_hi16_pcrel: + case ARM::fixup_arm_movw_lo16_pcrel: + case ARM::fixup_t2_movt_hi16: + case ARM::fixup_t2_movw_lo16: + case ARM::fixup_t2_movt_hi16_pcrel: + case ARM::fixup_t2_movw_lo16_pcrel: + return 4; + } +} + +void DarwinARMAsmBackend::ApplyFixup(const MCFixup &Fixup, char *Data, + unsigned DataSize, uint64_t Value) const { + unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); + Value = adjustFixupValue(Fixup.getKind(), Value); + if (!Value) return; // Doesn't change encoding. + + unsigned Offset = Fixup.getOffset(); + assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); + + // For each byte of the fragment that the fixup touches, mask in the + // bits from the fixup value. + for (unsigned i = 0; i != NumBytes; ++i) + Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); +} + +} // end anonymous namespace + +MCAsmBackend *llvm::createARMAsmBackend(const Target &T, StringRef TT) { + Triple TheTriple(TT); + + if (TheTriple.isOSDarwin()) { + if (TheTriple.getArchName() == "armv4t" || + TheTriple.getArchName() == "thumbv4t") + return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V4T); + else if (TheTriple.getArchName() == "armv5e" || + TheTriple.getArchName() == "thumbv5e") + return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V5TEJ); + else if (TheTriple.getArchName() == "armv6" || + TheTriple.getArchName() == "thumbv6") + return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V6); + return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7); + } + + if (TheTriple.isOSWindows()) + assert(0 && "Windows not supported on ARM"); + + return new ELFARMAsmBackend(T, TT, Triple(TT).getOS()); +} diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h new file mode 100644 index 0000000..ec4b6ff --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h @@ -0,0 +1,449 @@ +//===-- ARMBaseInfo.h - Top level definitions for ARM -------- --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains small standalone helper functions and enum definitions for +// the ARM target useful for the compiler back-end and the MC libraries. +// As such, it deliberately does not include references to LLVM core +// code gen types, passes, etc.. +// +//===----------------------------------------------------------------------===// + +#ifndef ARMBASEINFO_H +#define ARMBASEINFO_H + +#include "ARMMCTargetDesc.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +// Enums corresponding to ARM condition codes +namespace ARMCC { + // The CondCodes constants map directly to the 4-bit encoding of the + // condition field for predicated instructions. + enum CondCodes { // Meaning (integer) Meaning (floating-point) + EQ, // Equal Equal + NE, // Not equal Not equal, or unordered + HS, // Carry set >, ==, or unordered + LO, // Carry clear Less than + MI, // Minus, negative Less than + PL, // Plus, positive or zero >, ==, or unordered + VS, // Overflow Unordered + VC, // No overflow Not unordered + HI, // Unsigned higher Greater than, or unordered + LS, // Unsigned lower or same Less than or equal + GE, // Greater than or equal Greater than or equal + LT, // Less than Less than, or unordered + GT, // Greater than Greater than + LE, // Less than or equal <, ==, or unordered + AL // Always (unconditional) Always (unconditional) + }; + + inline static CondCodes getOppositeCondition(CondCodes CC) { + switch (CC) { + default: llvm_unreachable("Unknown condition code"); + case EQ: return NE; + case NE: return EQ; + case HS: return LO; + case LO: return HS; + case MI: return PL; + case PL: return MI; + case VS: return VC; + case VC: return VS; + case HI: return LS; + case LS: return HI; + case GE: return LT; + case LT: return GE; + case GT: return LE; + case LE: return GT; + } + } +} // namespace ARMCC + +inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) { + switch (CC) { + default: llvm_unreachable("Unknown condition code"); + case ARMCC::EQ: return "eq"; + case ARMCC::NE: return "ne"; + case ARMCC::HS: return "hs"; + case ARMCC::LO: return "lo"; + case ARMCC::MI: return "mi"; + case ARMCC::PL: return "pl"; + case ARMCC::VS: return "vs"; + case ARMCC::VC: return "vc"; + case ARMCC::HI: return "hi"; + case ARMCC::LS: return "ls"; + case ARMCC::GE: return "ge"; + case ARMCC::LT: return "lt"; + case ARMCC::GT: return "gt"; + case ARMCC::LE: return "le"; + case ARMCC::AL: return "al"; + } +} + +namespace ARM_PROC { + enum IMod { + IE = 2, + ID = 3 + }; + + enum IFlags { + F = 1, + I = 2, + A = 4 + }; + + inline static const char *IFlagsToString(unsigned val) { + switch (val) { + default: llvm_unreachable("Unknown iflags operand"); + case F: return "f"; + case I: return "i"; + case A: return "a"; + } + } + + inline static const char *IModToString(unsigned val) { + switch (val) { + default: llvm_unreachable("Unknown imod operand"); + case IE: return "ie"; + case ID: return "id"; + } + } +} + +namespace ARM_MB { + // The Memory Barrier Option constants map directly to the 4-bit encoding of + // the option field for memory barrier operations. + enum MemBOpt { + SY = 15, + ST = 14, + ISH = 11, + ISHST = 10, + NSH = 7, + NSHST = 6, + OSH = 3, + OSHST = 2 + }; + + inline static const char *MemBOptToString(unsigned val) { + switch (val) { + default: llvm_unreachable("Unknown memory operation"); + case SY: return "sy"; + case ST: return "st"; + case ISH: return "ish"; + case ISHST: return "ishst"; + case NSH: return "nsh"; + case NSHST: return "nshst"; + case OSH: return "osh"; + case OSHST: return "oshst"; + } + } +} // namespace ARM_MB + +/// getARMRegisterNumbering - Given the enum value for some register, e.g. +/// ARM::LR, return the number that it corresponds to (e.g. 14). +inline static unsigned getARMRegisterNumbering(unsigned Reg) { + using namespace ARM; + switch (Reg) { + default: + llvm_unreachable("Unknown ARM register!"); + case R0: case S0: case D0: case Q0: return 0; + case R1: case S1: case D1: case Q1: return 1; + case R2: case S2: case D2: case Q2: return 2; + case R3: case S3: case D3: case Q3: return 3; + case R4: case S4: case D4: case Q4: return 4; + case R5: case S5: case D5: case Q5: return 5; + case R6: case S6: case D6: case Q6: return 6; + case R7: case S7: case D7: case Q7: return 7; + case R8: case S8: case D8: case Q8: return 8; + case R9: case S9: case D9: case Q9: return 9; + case R10: case S10: case D10: case Q10: return 10; + case R11: case S11: case D11: case Q11: return 11; + case R12: case S12: case D12: case Q12: return 12; + case SP: case S13: case D13: case Q13: return 13; + case LR: case S14: case D14: case Q14: return 14; + case PC: case S15: case D15: case Q15: return 15; + + case S16: case D16: return 16; + case S17: case D17: return 17; + case S18: case D18: return 18; + case S19: case D19: return 19; + case S20: case D20: return 20; + case S21: case D21: return 21; + case S22: case D22: return 22; + case S23: case D23: return 23; + case S24: case D24: return 24; + case S25: case D25: return 25; + case S26: case D26: return 26; + case S27: case D27: return 27; + case S28: case D28: return 28; + case S29: case D29: return 29; + case S30: case D30: return 30; + case S31: case D31: return 31; + } +} + +/// isARMLowRegister - Returns true if the register is a low register (r0-r7). +/// +static inline bool isARMLowRegister(unsigned Reg) { + using namespace ARM; + switch (Reg) { + case R0: case R1: case R2: case R3: + case R4: case R5: case R6: case R7: + return true; + default: + return false; + } +} + +/// ARMII - This namespace holds all of the target specific flags that +/// instruction info tracks. +/// +namespace ARMII { + + /// ARM Index Modes + enum IndexMode { + IndexModeNone = 0, + IndexModePre = 1, + IndexModePost = 2, + IndexModeUpd = 3 + }; + + /// ARM Addressing Modes + enum AddrMode { + AddrModeNone = 0, + AddrMode1 = 1, + AddrMode2 = 2, + AddrMode3 = 3, + AddrMode4 = 4, + AddrMode5 = 5, + AddrMode6 = 6, + AddrModeT1_1 = 7, + AddrModeT1_2 = 8, + AddrModeT1_4 = 9, + AddrModeT1_s = 10, // i8 * 4 for pc and sp relative data + AddrModeT2_i12 = 11, + AddrModeT2_i8 = 12, + AddrModeT2_so = 13, + AddrModeT2_pc = 14, // +/- i12 for pc relative data + AddrModeT2_i8s4 = 15, // i8 * 4 + AddrMode_i12 = 16 + }; + + inline static const char *AddrModeToString(AddrMode addrmode) { + switch (addrmode) { + default: llvm_unreachable("Unknown memory operation"); + case AddrModeNone: return "AddrModeNone"; + case AddrMode1: return "AddrMode1"; + case AddrMode2: return "AddrMode2"; + case AddrMode3: return "AddrMode3"; + case AddrMode4: return "AddrMode4"; + case AddrMode5: return "AddrMode5"; + case AddrMode6: return "AddrMode6"; + case AddrModeT1_1: return "AddrModeT1_1"; + case AddrModeT1_2: return "AddrModeT1_2"; + case AddrModeT1_4: return "AddrModeT1_4"; + case AddrModeT1_s: return "AddrModeT1_s"; + case AddrModeT2_i12: return "AddrModeT2_i12"; + case AddrModeT2_i8: return "AddrModeT2_i8"; + case AddrModeT2_so: return "AddrModeT2_so"; + case AddrModeT2_pc: return "AddrModeT2_pc"; + case AddrModeT2_i8s4: return "AddrModeT2_i8s4"; + case AddrMode_i12: return "AddrMode_i12"; + } + } + + /// Target Operand Flag enum. + enum TOF { + //===------------------------------------------------------------------===// + // ARM Specific MachineOperand flags. + + MO_NO_FLAG, + + /// MO_LO16 - On a symbol operand, this represents a relocation containing + /// lower 16 bit of the address. Used only via movw instruction. + MO_LO16, + + /// MO_HI16 - On a symbol operand, this represents a relocation containing + /// higher 16 bit of the address. Used only via movt instruction. + MO_HI16, + + /// MO_LO16_NONLAZY - On a symbol operand "FOO", this represents a + /// relocation containing lower 16 bit of the non-lazy-ptr indirect symbol, + /// i.e. "FOO$non_lazy_ptr". + /// Used only via movw instruction. + MO_LO16_NONLAZY, + + /// MO_HI16_NONLAZY - On a symbol operand "FOO", this represents a + /// relocation containing lower 16 bit of the non-lazy-ptr indirect symbol, + /// i.e. "FOO$non_lazy_ptr". Used only via movt instruction. + MO_HI16_NONLAZY, + + /// MO_LO16_NONLAZY_PIC - On a symbol operand "FOO", this represents a + /// relocation containing lower 16 bit of the PC relative address of the + /// non-lazy-ptr indirect symbol, i.e. "FOO$non_lazy_ptr - LABEL". + /// Used only via movw instruction. + MO_LO16_NONLAZY_PIC, + + /// MO_HI16_NONLAZY_PIC - On a symbol operand "FOO", this represents a + /// relocation containing lower 16 bit of the PC relative address of the + /// non-lazy-ptr indirect symbol, i.e. "FOO$non_lazy_ptr - LABEL". + /// Used only via movt instruction. + MO_HI16_NONLAZY_PIC, + + /// MO_PLT - On a symbol operand, this represents an ELF PLT reference on a + /// call operand. + MO_PLT + }; + + enum { + //===------------------------------------------------------------------===// + // Instruction Flags. + + //===------------------------------------------------------------------===// + // This four-bit field describes the addressing mode used. + AddrModeMask = 0x1f, // The AddrMode enums are declared in ARMBaseInfo.h + + // IndexMode - Unindex, pre-indexed, or post-indexed are valid for load + // and store ops only. Generic "updating" flag is used for ld/st multiple. + // The index mode enums are declared in ARMBaseInfo.h + IndexModeShift = 5, + IndexModeMask = 3 << IndexModeShift, + + //===------------------------------------------------------------------===// + // Instruction encoding formats. + // + FormShift = 7, + FormMask = 0x3f << FormShift, + + // Pseudo instructions + Pseudo = 0 << FormShift, + + // Multiply instructions + MulFrm = 1 << FormShift, + + // Branch instructions + BrFrm = 2 << FormShift, + BrMiscFrm = 3 << FormShift, + + // Data Processing instructions + DPFrm = 4 << FormShift, + DPSoRegFrm = 5 << FormShift, + + // Load and Store + LdFrm = 6 << FormShift, + StFrm = 7 << FormShift, + LdMiscFrm = 8 << FormShift, + StMiscFrm = 9 << FormShift, + LdStMulFrm = 10 << FormShift, + + LdStExFrm = 11 << FormShift, + + // Miscellaneous arithmetic instructions + ArithMiscFrm = 12 << FormShift, + SatFrm = 13 << FormShift, + + // Extend instructions + ExtFrm = 14 << FormShift, + + // VFP formats + VFPUnaryFrm = 15 << FormShift, + VFPBinaryFrm = 16 << FormShift, + VFPConv1Frm = 17 << FormShift, + VFPConv2Frm = 18 << FormShift, + VFPConv3Frm = 19 << FormShift, + VFPConv4Frm = 20 << FormShift, + VFPConv5Frm = 21 << FormShift, + VFPLdStFrm = 22 << FormShift, + VFPLdStMulFrm = 23 << FormShift, + VFPMiscFrm = 24 << FormShift, + + // Thumb format + ThumbFrm = 25 << FormShift, + + // Miscelleaneous format + MiscFrm = 26 << FormShift, + + // NEON formats + NGetLnFrm = 27 << FormShift, + NSetLnFrm = 28 << FormShift, + NDupFrm = 29 << FormShift, + NLdStFrm = 30 << FormShift, + N1RegModImmFrm= 31 << FormShift, + N2RegFrm = 32 << FormShift, + NVCVTFrm = 33 << FormShift, + NVDupLnFrm = 34 << FormShift, + N2RegVShLFrm = 35 << FormShift, + N2RegVShRFrm = 36 << FormShift, + N3RegFrm = 37 << FormShift, + N3RegVShFrm = 38 << FormShift, + NVExtFrm = 39 << FormShift, + NVMulSLFrm = 40 << FormShift, + NVTBLFrm = 41 << FormShift, + + //===------------------------------------------------------------------===// + // Misc flags. + + // UnaryDP - Indicates this is a unary data processing instruction, i.e. + // it doesn't have a Rn operand. + UnaryDP = 1 << 13, + + // Xform16Bit - Indicates this Thumb2 instruction may be transformed into + // a 16-bit Thumb instruction if certain conditions are met. + Xform16Bit = 1 << 14, + + // ThumbArithFlagSetting - The instruction is a 16-bit flag setting Thumb + // instruction. Used by the parser to determine whether to require the 'S' + // suffix on the mnemonic (when not in an IT block) or preclude it (when + // in an IT block). + ThumbArithFlagSetting = 1 << 18, + + //===------------------------------------------------------------------===// + // Code domain. + DomainShift = 15, + DomainMask = 7 << DomainShift, + DomainGeneral = 0 << DomainShift, + DomainVFP = 1 << DomainShift, + DomainNEON = 2 << DomainShift, + DomainNEONA8 = 4 << DomainShift, + + //===------------------------------------------------------------------===// + // Field shifts - such shifts are used to set field while generating + // machine instructions. + // + // FIXME: This list will need adjusting/fixing as the MC code emitter + // takes shape and the ARMCodeEmitter.cpp bits go away. + ShiftTypeShift = 4, + + M_BitShift = 5, + ShiftImmShift = 5, + ShiftShift = 7, + N_BitShift = 7, + ImmHiShift = 8, + SoRotImmShift = 8, + RegRsShift = 8, + ExtRotImmShift = 10, + RegRdLoShift = 12, + RegRdShift = 12, + RegRdHiShift = 16, + RegRnShift = 16, + S_BitShift = 20, + W_BitShift = 21, + AM3_I_BitShift = 22, + D_BitShift = 22, + U_BitShift = 23, + P_BitShift = 24, + I_BitShift = 25, + CondShift = 28 + }; + +} // end namespace ARMII + +} // end namespace llvm; + +#endif diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h new file mode 100644 index 0000000..350c92d --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h @@ -0,0 +1,97 @@ +//===-- ARM/ARMFixupKinds.h - ARM Specific Fixup Entries --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ARM_ARMFIXUPKINDS_H +#define LLVM_ARM_ARMFIXUPKINDS_H + +#include "llvm/MC/MCFixup.h" + +namespace llvm { +namespace ARM { +enum Fixups { + // fixup_arm_ldst_pcrel_12 - 12-bit PC relative relocation for symbol + // addresses + fixup_arm_ldst_pcrel_12 = FirstTargetFixupKind, + + // fixup_t2_ldst_pcrel_12 - Equivalent to fixup_arm_ldst_pcrel_12, with + // the 16-bit halfwords reordered. + fixup_t2_ldst_pcrel_12, + + // fixup_arm_pcrel_10 - 10-bit PC relative relocation for symbol addresses + // used in VFP instructions where the lower 2 bits are not encoded + // (so it's encoded as an 8-bit immediate). + fixup_arm_pcrel_10, + // fixup_t2_pcrel_10 - Equivalent to fixup_arm_pcrel_10, accounting for + // the short-swapped encoding of Thumb2 instructions. + fixup_t2_pcrel_10, + // fixup_thumb_adr_pcrel_10 - 10-bit PC relative relocation for symbol + // addresses where the lower 2 bits are not encoded (so it's encoded as an + // 8-bit immediate). + fixup_thumb_adr_pcrel_10, + // fixup_arm_adr_pcrel_12 - 12-bit PC relative relocation for the ADR + // instruction. + fixup_arm_adr_pcrel_12, + // fixup_t2_adr_pcrel_12 - 12-bit PC relative relocation for the ADR + // instruction. + fixup_t2_adr_pcrel_12, + // fixup_arm_condbranch - 24-bit PC relative relocation for conditional branch + // instructions. + fixup_arm_condbranch, + // fixup_arm_uncondbranch - 24-bit PC relative relocation for + // branch instructions. (unconditional) + fixup_arm_uncondbranch, + // fixup_t2_condbranch - 20-bit PC relative relocation for Thumb2 direct + // uconditional branch instructions. + fixup_t2_condbranch, + // fixup_t2_uncondbranch - 20-bit PC relative relocation for Thumb2 direct + // branch unconditional branch instructions. + fixup_t2_uncondbranch, + + // fixup_arm_thumb_br - 12-bit fixup for Thumb B instructions. + fixup_arm_thumb_br, + + // fixup_arm_thumb_bl - Fixup for Thumb BL instructions. + fixup_arm_thumb_bl, + + // fixup_arm_thumb_blx - Fixup for Thumb BLX instructions. + fixup_arm_thumb_blx, + + // fixup_arm_thumb_cb - Fixup for Thumb branch instructions. + fixup_arm_thumb_cb, + + // fixup_arm_thumb_cp - Fixup for Thumb load/store from constant pool instrs. + fixup_arm_thumb_cp, + + // fixup_arm_thumb_bcc - Fixup for Thumb conditional branching instructions. + fixup_arm_thumb_bcc, + + // The next two are for the movt/movw pair + // the 16bit imm field are split into imm{15-12} and imm{11-0} + fixup_arm_movt_hi16, // :upper16: + fixup_arm_movw_lo16, // :lower16: + fixup_t2_movt_hi16, // :upper16: + fixup_t2_movw_lo16, // :lower16: + + // It is possible to create an "immediate" that happens to be pcrel. + // movw r0, :lower16:Foo-(Bar+8) and movt r0, :upper16:Foo-(Bar+8) + // result in different reloc tags than the above two. + // Needed to support ELF::R_ARM_MOVT_PREL and ELF::R_ARM_MOVW_PREL_NC + fixup_arm_movt_hi16_pcrel, // :upper16: + fixup_arm_movw_lo16_pcrel, // :lower16: + fixup_t2_movt_hi16_pcrel, // :upper16: + fixup_t2_movw_lo16_pcrel, // :lower16: + + // Marker + LastTargetFixupKind, + NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind +}; +} +} + +#endif diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp index 53b4c95..1c109e0 100644 --- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp @@ -52,6 +52,9 @@ ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin() { AsmTransCBE = arm_asm_table; Data64bitsDirective = 0; CommentString = "@"; + Code16Directive = ".code\t16"; + Code32Directive = ".code\t32"; + SupportsDebugInformation = true; // Exceptions handling @@ -64,12 +67,14 @@ ARMELFMCAsmInfo::ARMELFMCAsmInfo() { Data64bitsDirective = 0; CommentString = "@"; - - HasLEB128 = true; PrivateGlobalPrefix = ".L"; + Code16Directive = ".code\t16"; + Code32Directive = ".code\t32"; + WeakRefDirective = "\t.weak\t"; - HasLCOMMDirective = true; + LCOMMDirectiveType = LCOMM::NoAlignment; + HasLEB128 = true; SupportsDebugInformation = true; // Exceptions handling diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp new file mode 100644 index 0000000..865c3e2 --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -0,0 +1,1468 @@ +//===-- ARM/ARMMCCodeEmitter.cpp - Convert ARM code to machine code -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the ARMMCCodeEmitter class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mccodeemitter" +#include "MCTargetDesc/ARMAddressingModes.h" +#include "MCTargetDesc/ARMBaseInfo.h" +#include "MCTargetDesc/ARMFixupKinds.h" +#include "MCTargetDesc/ARMMCExpr.h" +#include "MCTargetDesc/ARMMCTargetDesc.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +STATISTIC(MCNumEmitted, "Number of MC instructions emitted."); +STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created."); + +namespace { +class ARMMCCodeEmitter : public MCCodeEmitter { + ARMMCCodeEmitter(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT + void operator=(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT + const MCInstrInfo &MCII; + const MCSubtargetInfo &STI; + +public: + ARMMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, + MCContext &ctx) + : MCII(mcii), STI(sti) { + } + + ~ARMMCCodeEmitter() {} + + bool isThumb() const { + // FIXME: Can tablegen auto-generate this? + return (STI.getFeatureBits() & ARM::ModeThumb) != 0; + } + bool isThumb2() const { + return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) != 0; + } + bool isTargetDarwin() const { + Triple TT(STI.getTargetTriple()); + Triple::OSType OS = TT.getOS(); + return OS == Triple::Darwin || OS == Triple::MacOSX || OS == Triple::IOS; + } + + unsigned getMachineSoImmOpValue(unsigned SoImm) const; + + // getBinaryCodeForInstr - TableGen'erated function for getting the + // binary encoding for an instruction. + unsigned getBinaryCodeForInstr(const MCInst &MI, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getMachineOpValue - Return binary encoding of operand. If the machine + /// operand requires relocation, record the relocation and return zero. + unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getHiLo16ImmOpValue - Return the encoding for the hi / low 16-bit of + /// the specified operand. This is used for operands with :lower16: and + /// :upper16: prefixes. + uint32_t getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + bool EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, + unsigned &Reg, unsigned &Imm, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getThumbBLTargetOpValue - Return encoding info for Thumb immediate + /// BL branch target. + uint32_t getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate + /// BLX branch target. + uint32_t getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getThumbBRTargetOpValue - Return encoding info for Thumb branch target. + uint32_t getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target. + uint32_t getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getThumbCBTargetOpValue - Return encoding info for Thumb branch target. + uint32_t getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getBranchTargetOpValue - Return encoding info for 24-bit immediate + /// branch target. + uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit + /// immediate Thumb2 direct branch target. + uint32_t getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getARMBranchTargetOpValue - Return encoding info for 24-bit immediate + /// branch target. + uint32_t getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + uint32_t getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAdrLabelOpValue - Return encoding info for 12-bit immediate + /// ADR label target. + uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + uint32_t getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + uint32_t getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + + /// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' + /// operand. + uint32_t getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getThumbAddrModeRegRegOpValue - Return encoding for 'reg + reg' operand. + uint32_t getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups)const; + + /// getT2AddrModeImm8s4OpValue - Return encoding info for 'reg +/- imm8<<2' + /// operand. + uint32_t getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getT2AddrModeImm0_1020s4OpValue - Return encoding info for 'reg + imm8<<2' + /// operand. + uint32_t getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getT2Imm8s4OpValue - Return encoding info for '+/- imm8<<2' + /// operand. + uint32_t getT2Imm8s4OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + + /// getLdStSORegOpValue - Return encoding info for 'reg +/- reg shop imm' + /// operand as needed by load/store instructions. + uint32_t getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getLdStmModeOpValue - Return encoding for load/store multiple mode. + uint32_t getLdStmModeOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + ARM_AM::AMSubMode Mode = (ARM_AM::AMSubMode)MI.getOperand(OpIdx).getImm(); + switch (Mode) { + default: assert(0 && "Unknown addressing sub-mode!"); + case ARM_AM::da: return 0; + case ARM_AM::ia: return 1; + case ARM_AM::db: return 2; + case ARM_AM::ib: return 3; + } + } + /// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value. + /// + unsigned getShiftOp(ARM_AM::ShiftOpc ShOpc) const { + switch (ShOpc) { + default: llvm_unreachable("Unknown shift opc!"); + case ARM_AM::no_shift: + case ARM_AM::lsl: return 0; + case ARM_AM::lsr: return 1; + case ARM_AM::asr: return 2; + case ARM_AM::ror: + case ARM_AM::rrx: return 3; + } + return 0; + } + + /// getAddrMode2OpValue - Return encoding for addrmode2 operands. + uint32_t getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAddrMode2OffsetOpValue - Return encoding for am2offset operands. + uint32_t getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getPostIdxRegOpValue - Return encoding for postidx_reg operands. + uint32_t getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAddrMode3OffsetOpValue - Return encoding for am3offset operands. + uint32_t getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAddrMode3OpValue - Return encoding for addrmode3 operands. + uint32_t getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAddrModeThumbSPOpValue - Return encoding info for 'reg +/- imm12' + /// operand. + uint32_t getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAddrModeISOpValue - Encode the t_addrmode_is# operands. + uint32_t getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands. + uint32_t getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getAddrMode5OpValue - Return encoding info for 'reg +/- imm8' operand. + uint32_t getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getCCOutOpValue - Return encoding of the 's' bit. + unsigned getCCOutOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + // The operand is either reg0 or CPSR. The 's' bit is encoded as '0' or + // '1' respectively. + return MI.getOperand(Op).getReg() == ARM::CPSR; + } + + /// getSOImmOpValue - Return an encoded 12-bit shifted-immediate value. + unsigned getSOImmOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + unsigned SoImm = MI.getOperand(Op).getImm(); + int SoImmVal = ARM_AM::getSOImmVal(SoImm); + assert(SoImmVal != -1 && "Not a valid so_imm value!"); + + // Encode rotate_imm. + unsigned Binary = (ARM_AM::getSOImmValRot((unsigned)SoImmVal) >> 1) + << ARMII::SoRotImmShift; + + // Encode immed_8. + Binary |= ARM_AM::getSOImmValImm((unsigned)SoImmVal); + return Binary; + } + + /// getT2SOImmOpValue - Return an encoded 12-bit shifted-immediate value. + unsigned getT2SOImmOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + unsigned SoImm = MI.getOperand(Op).getImm(); + unsigned Encoded = ARM_AM::getT2SOImmVal(SoImm); + assert(Encoded != ~0U && "Not a Thumb2 so_imm value?"); + return Encoded; + } + + unsigned getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getSORegOpValue - Return an encoded so_reg shifted register value. + unsigned getSORegRegOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getSORegImmOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getT2SORegOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getNEONVcvtImm32OpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + return 64 - MI.getOperand(Op).getImm(); + } + + unsigned getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getRegisterListOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getShiftRight8Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftRight16Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftRight32Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftRight64Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned NEONThumb2DataIPostEncoder(const MCInst &MI, + unsigned EncodedValue) const; + unsigned NEONThumb2LoadStorePostEncoder(const MCInst &MI, + unsigned EncodedValue) const; + unsigned NEONThumb2DupPostEncoder(const MCInst &MI, + unsigned EncodedValue) const; + + unsigned VFPThumb2PostEncoder(const MCInst &MI, + unsigned EncodedValue) const; + + void EmitByte(unsigned char C, raw_ostream &OS) const { + OS << (char)C; + } + + void EmitConstant(uint64_t Val, unsigned Size, raw_ostream &OS) const { + // Output the constant in little endian byte order. + for (unsigned i = 0; i != Size; ++i) { + EmitByte(Val & 255, OS); + Val >>= 8; + } + } + + void EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups) const; +}; + +} // end anonymous namespace + +MCCodeEmitter *llvm::createARMMCCodeEmitter(const MCInstrInfo &MCII, + const MCSubtargetInfo &STI, + MCContext &Ctx) { + return new ARMMCCodeEmitter(MCII, STI, Ctx); +} + +/// NEONThumb2DataIPostEncoder - Post-process encoded NEON data-processing +/// instructions, and rewrite them to their Thumb2 form if we are currently in +/// Thumb2 mode. +unsigned ARMMCCodeEmitter::NEONThumb2DataIPostEncoder(const MCInst &MI, + unsigned EncodedValue) const { + if (isThumb2()) { + // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved + // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are + // set to 1111. + unsigned Bit24 = EncodedValue & 0x01000000; + unsigned Bit28 = Bit24 << 4; + EncodedValue &= 0xEFFFFFFF; + EncodedValue |= Bit28; + EncodedValue |= 0x0F000000; + } + + return EncodedValue; +} + +/// NEONThumb2LoadStorePostEncoder - Post-process encoded NEON load/store +/// instructions, and rewrite them to their Thumb2 form if we are currently in +/// Thumb2 mode. +unsigned ARMMCCodeEmitter::NEONThumb2LoadStorePostEncoder(const MCInst &MI, + unsigned EncodedValue) const { + if (isThumb2()) { + EncodedValue &= 0xF0FFFFFF; + EncodedValue |= 0x09000000; + } + + return EncodedValue; +} + +/// NEONThumb2DupPostEncoder - Post-process encoded NEON vdup +/// instructions, and rewrite them to their Thumb2 form if we are currently in +/// Thumb2 mode. +unsigned ARMMCCodeEmitter::NEONThumb2DupPostEncoder(const MCInst &MI, + unsigned EncodedValue) const { + if (isThumb2()) { + EncodedValue &= 0x00FFFFFF; + EncodedValue |= 0xEE000000; + } + + return EncodedValue; +} + +/// VFPThumb2PostEncoder - Post-process encoded VFP instructions and rewrite +/// them to their Thumb2 form if we are currently in Thumb2 mode. +unsigned ARMMCCodeEmitter:: +VFPThumb2PostEncoder(const MCInst &MI, unsigned EncodedValue) const { + if (isThumb2()) { + EncodedValue &= 0x0FFFFFFF; + EncodedValue |= 0xE0000000; + } + return EncodedValue; +} + +/// getMachineOpValue - Return binary encoding of operand. If the machine +/// operand requires relocation, record the relocation and return zero. +unsigned ARMMCCodeEmitter:: +getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups) const { + if (MO.isReg()) { + unsigned Reg = MO.getReg(); + unsigned RegNo = getARMRegisterNumbering(Reg); + + // Q registers are encoded as 2x their register number. + switch (Reg) { + default: + return RegNo; + case ARM::Q0: case ARM::Q1: case ARM::Q2: case ARM::Q3: + case ARM::Q4: case ARM::Q5: case ARM::Q6: case ARM::Q7: + case ARM::Q8: case ARM::Q9: case ARM::Q10: case ARM::Q11: + case ARM::Q12: case ARM::Q13: case ARM::Q14: case ARM::Q15: + return 2 * RegNo; + } + } else if (MO.isImm()) { + return static_cast<unsigned>(MO.getImm()); + } else if (MO.isFPImm()) { + return static_cast<unsigned>(APFloat(MO.getFPImm()) + .bitcastToAPInt().getHiBits(32).getLimitedValue()); + } + + llvm_unreachable("Unable to encode MCOperand!"); + return 0; +} + +/// getAddrModeImmOpValue - Return encoding info for 'reg +/- imm' operand. +bool ARMMCCodeEmitter:: +EncodeAddrModeOpValues(const MCInst &MI, unsigned OpIdx, unsigned &Reg, + unsigned &Imm, SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + + Reg = getARMRegisterNumbering(MO.getReg()); + + int32_t SImm = MO1.getImm(); + bool isAdd = true; + + // Special value for #-0 + if (SImm == INT32_MIN) { + SImm = 0; + isAdd = false; + } + + // Immediate is always encoded as positive. The 'U' bit controls add vs sub. + if (SImm < 0) { + SImm = -SImm; + isAdd = false; + } + + Imm = SImm; + return isAdd; +} + +/// getBranchTargetOpValue - Helper function to get the branch target operand, +/// which is either an immediate or requires a fixup. +static uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + unsigned FixupKind, + SmallVectorImpl<MCFixup> &Fixups) { + const MCOperand &MO = MI.getOperand(OpIdx); + + // If the destination is an immediate, we have nothing to do. + if (MO.isImm()) return MO.getImm(); + assert(MO.isExpr() && "Unexpected branch target type!"); + const MCExpr *Expr = MO.getExpr(); + MCFixupKind Kind = MCFixupKind(FixupKind); + Fixups.push_back(MCFixup::Create(0, Expr, Kind)); + + // All of the information is in the fixup. + return 0; +} + +// Thumb BL and BLX use a strange offset encoding where bits 22 and 21 are +// determined by negating them and XOR'ing them with bit 23. +static int32_t encodeThumbBLOffset(int32_t offset) { + offset >>= 1; + uint32_t S = (offset & 0x800000) >> 23; + uint32_t J1 = (offset & 0x400000) >> 22; + uint32_t J2 = (offset & 0x200000) >> 21; + J1 = (~J1 & 0x1); + J2 = (~J2 & 0x1); + J1 ^= S; + J2 ^= S; + + offset &= ~0x600000; + offset |= J1 << 22; + offset |= J2 << 21; + + return offset; +} + +/// getThumbBLTargetOpValue - Return encoding info for immediate branch target. +uint32_t ARMMCCodeEmitter:: +getThumbBLTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bl, + Fixups); + return encodeThumbBLOffset(MO.getImm()); +} + +/// getThumbBLXTargetOpValue - Return encoding info for Thumb immediate +/// BLX branch target. +uint32_t ARMMCCodeEmitter:: +getThumbBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_blx, + Fixups); + return encodeThumbBLOffset(MO.getImm()); +} + +/// getThumbBRTargetOpValue - Return encoding info for Thumb branch target. +uint32_t ARMMCCodeEmitter:: +getThumbBRTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_br, + Fixups); + return (MO.getImm() >> 1); +} + +/// getThumbBCCTargetOpValue - Return encoding info for Thumb branch target. +uint32_t ARMMCCodeEmitter:: +getThumbBCCTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_bcc, + Fixups); + return (MO.getImm() >> 1); +} + +/// getThumbCBTargetOpValue - Return encoding info for Thumb branch target. +uint32_t ARMMCCodeEmitter:: +getThumbCBTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cb, Fixups); + return (MO.getImm() >> 1); +} + +/// Return true if this branch has a non-always predication +static bool HasConditionalBranch(const MCInst &MI) { + int NumOp = MI.getNumOperands(); + if (NumOp >= 2) { + for (int i = 0; i < NumOp-1; ++i) { + const MCOperand &MCOp1 = MI.getOperand(i); + const MCOperand &MCOp2 = MI.getOperand(i + 1); + if (MCOp1.isImm() && MCOp2.isReg() && + (MCOp2.getReg() == 0 || MCOp2.getReg() == ARM::CPSR)) { + if (ARMCC::CondCodes(MCOp1.getImm()) != ARMCC::AL) + return true; + } + } + } + return false; +} + +/// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch +/// target. +uint32_t ARMMCCodeEmitter:: +getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // FIXME: This really, really shouldn't use TargetMachine. We don't want + // coupling between MC and TM anywhere we can help it. + if (isThumb2()) + return + ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_condbranch, Fixups); + return getARMBranchTargetOpValue(MI, OpIdx, Fixups); +} + +/// getBranchTargetOpValue - Return encoding info for 24-bit immediate branch +/// target. +uint32_t ARMMCCodeEmitter:: +getARMBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) { + if (HasConditionalBranch(MI)) + return ::getBranchTargetOpValue(MI, OpIdx, + ARM::fixup_arm_condbranch, Fixups); + return ::getBranchTargetOpValue(MI, OpIdx, + ARM::fixup_arm_uncondbranch, Fixups); + } + + return MO.getImm() >> 2; +} + +uint32_t ARMMCCodeEmitter:: +getARMBLXTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) { + if (HasConditionalBranch(MI)) + return ::getBranchTargetOpValue(MI, OpIdx, + ARM::fixup_arm_condbranch, Fixups); + return ::getBranchTargetOpValue(MI, OpIdx, + ARM::fixup_arm_uncondbranch, Fixups); + } + + return MO.getImm() >> 1; +} + +/// getUnconditionalBranchTargetOpValue - Return encoding info for 24-bit +/// immediate branch target. +uint32_t ARMMCCodeEmitter:: +getUnconditionalBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + unsigned Val = + ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_uncondbranch, Fixups); + bool I = (Val & 0x800000); + bool J1 = (Val & 0x400000); + bool J2 = (Val & 0x200000); + if (I ^ J1) + Val &= ~0x400000; + else + Val |= 0x400000; + + if (I ^ J2) + Val &= ~0x200000; + else + Val |= 0x200000; + + return Val; +} + +/// getAdrLabelOpValue - Return encoding info for 12-bit immediate ADR label +/// target. +uint32_t ARMMCCodeEmitter:: +getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_adr_pcrel_12, + Fixups); + int32_t offset = MO.getImm(); + uint32_t Val = 0x2000; + if (offset < 0) { + Val = 0x1000; + offset *= -1; + } + Val |= offset; + return Val; +} + +/// getAdrLabelOpValue - Return encoding info for 12-bit immediate ADR label +/// target. +uint32_t ARMMCCodeEmitter:: +getT2AdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_t2_adr_pcrel_12, + Fixups); + int32_t Val = MO.getImm(); + if (Val < 0) { + Val *= -1; + Val |= 0x1000; + } + return Val; +} + +/// getAdrLabelOpValue - Return encoding info for 8-bit immediate ADR label +/// target. +uint32_t ARMMCCodeEmitter:: +getThumbAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_thumb_adr_pcrel_10, + Fixups); + return MO.getImm(); +} + +/// getThumbAddrModeRegRegOpValue - Return encoding info for 'reg + reg' +/// operand. +uint32_t ARMMCCodeEmitter:: +getThumbAddrModeRegRegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &) const { + // [Rn, Rm] + // {5-3} = Rm + // {2-0} = Rn + const MCOperand &MO1 = MI.getOperand(OpIdx); + const MCOperand &MO2 = MI.getOperand(OpIdx + 1); + unsigned Rn = getARMRegisterNumbering(MO1.getReg()); + unsigned Rm = getARMRegisterNumbering(MO2.getReg()); + return (Rm << 3) | Rn; +} + +/// getAddrModeImm12OpValue - Return encoding info for 'reg +/- imm12' operand. +uint32_t ARMMCCodeEmitter:: +getAddrModeImm12OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {17-13} = reg + // {12} = (U)nsigned (add == '1', sub == '0') + // {11-0} = imm12 + unsigned Reg, Imm12; + bool isAdd = true; + // If The first operand isn't a register, we have a label reference. + const MCOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) { + Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC. + Imm12 = 0; + isAdd = false ; // 'U' bit is set as part of the fixup. + + if (MO.isExpr()) { + const MCExpr *Expr = MO.getExpr(); + + MCFixupKind Kind; + if (isThumb2()) + Kind = MCFixupKind(ARM::fixup_t2_ldst_pcrel_12); + else + Kind = MCFixupKind(ARM::fixup_arm_ldst_pcrel_12); + Fixups.push_back(MCFixup::Create(0, Expr, Kind)); + + ++MCNumCPRelocations; + } else { + Reg = ARM::PC; + int32_t Offset = MO.getImm(); + if (Offset < 0) { + Offset *= -1; + isAdd = false; + } + Imm12 = Offset; + } + } else + isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm12, Fixups); + + uint32_t Binary = Imm12 & 0xfff; + // Immediate is always encoded as positive. The 'U' bit controls add vs sub. + if (isAdd) + Binary |= (1 << 12); + Binary |= (Reg << 13); + return Binary; +} + +/// getT2Imm8s4OpValue - Return encoding info for +/// '+/- imm8<<2' operand. +uint32_t ARMMCCodeEmitter:: +getT2Imm8s4OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // FIXME: The immediate operand should have already been encoded like this + // before ever getting here. The encoder method should just need to combine + // the MI operands for the register and the offset into a single + // representation for the complex operand in the .td file. This isn't just + // style, unfortunately. As-is, we can't represent the distinct encoding + // for #-0. + + // {8} = (U)nsigned (add == '1', sub == '0') + // {7-0} = imm8 + int32_t Imm8 = MI.getOperand(OpIdx).getImm(); + bool isAdd = Imm8 >= 0; + + // Immediate is always encoded as positive. The 'U' bit controls add vs sub. + if (Imm8 < 0) + Imm8 = -Imm8; + + // Scaled by 4. + Imm8 /= 4; + + uint32_t Binary = Imm8 & 0xff; + // Immediate is always encoded as positive. The 'U' bit controls add vs sub. + if (isAdd) + Binary |= (1 << 8); + return Binary; +} + +/// getT2AddrModeImm8s4OpValue - Return encoding info for +/// 'reg +/- imm8<<2' operand. +uint32_t ARMMCCodeEmitter:: +getT2AddrModeImm8s4OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {12-9} = reg + // {8} = (U)nsigned (add == '1', sub == '0') + // {7-0} = imm8 + unsigned Reg, Imm8; + bool isAdd = true; + // If The first operand isn't a register, we have a label reference. + const MCOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) { + Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC. + Imm8 = 0; + isAdd = false ; // 'U' bit is set as part of the fixup. + + assert(MO.isExpr() && "Unexpected machine operand type!"); + const MCExpr *Expr = MO.getExpr(); + MCFixupKind Kind = MCFixupKind(ARM::fixup_arm_pcrel_10); + Fixups.push_back(MCFixup::Create(0, Expr, Kind)); + + ++MCNumCPRelocations; + } else + isAdd = EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups); + + // FIXME: The immediate operand should have already been encoded like this + // before ever getting here. The encoder method should just need to combine + // the MI operands for the register and the offset into a single + // representation for the complex operand in the .td file. This isn't just + // style, unfortunately. As-is, we can't represent the distinct encoding + // for #-0. + uint32_t Binary = (Imm8 >> 2) & 0xff; + // Immediate is always encoded as positive. The 'U' bit controls add vs sub. + if (isAdd) + Binary |= (1 << 8); + Binary |= (Reg << 9); + return Binary; +} + +/// getT2AddrModeImm0_1020s4OpValue - Return encoding info for +/// 'reg + imm8<<2' operand. +uint32_t ARMMCCodeEmitter:: +getT2AddrModeImm0_1020s4OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {11-8} = reg + // {7-0} = imm8 + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + unsigned Reg = getARMRegisterNumbering(MO.getReg()); + unsigned Imm8 = MO1.getImm(); + return (Reg << 8) | Imm8; +} + +// FIXME: This routine assumes that a binary +// expression will always result in a PCRel expression +// In reality, its only true if one or more subexpressions +// is itself a PCRel (i.e. "." in asm or some other pcrel construct) +// but this is good enough for now. +static bool EvaluateAsPCRel(const MCExpr *Expr) { + switch (Expr->getKind()) { + default: assert(0 && "Unexpected expression type"); + case MCExpr::SymbolRef: return false; + case MCExpr::Binary: return true; + } +} + +uint32_t +ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {20-16} = imm{15-12} + // {11-0} = imm{11-0} + const MCOperand &MO = MI.getOperand(OpIdx); + if (MO.isImm()) + // Hi / lo 16 bits already extracted during earlier passes. + return static_cast<unsigned>(MO.getImm()); + + // Handle :upper16: and :lower16: assembly prefixes. + const MCExpr *E = MO.getExpr(); + if (E->getKind() == MCExpr::Target) { + const ARMMCExpr *ARM16Expr = cast<ARMMCExpr>(E); + E = ARM16Expr->getSubExpr(); + + MCFixupKind Kind; + switch (ARM16Expr->getKind()) { + default: assert(0 && "Unsupported ARMFixup"); + case ARMMCExpr::VK_ARM_HI16: + if (!isTargetDarwin() && EvaluateAsPCRel(E)) + Kind = MCFixupKind(isThumb2() + ? ARM::fixup_t2_movt_hi16_pcrel + : ARM::fixup_arm_movt_hi16_pcrel); + else + Kind = MCFixupKind(isThumb2() + ? ARM::fixup_t2_movt_hi16 + : ARM::fixup_arm_movt_hi16); + break; + case ARMMCExpr::VK_ARM_LO16: + if (!isTargetDarwin() && EvaluateAsPCRel(E)) + Kind = MCFixupKind(isThumb2() + ? ARM::fixup_t2_movw_lo16_pcrel + : ARM::fixup_arm_movw_lo16_pcrel); + else + Kind = MCFixupKind(isThumb2() + ? ARM::fixup_t2_movw_lo16 + : ARM::fixup_arm_movw_lo16); + break; + } + Fixups.push_back(MCFixup::Create(0, E, Kind)); + return 0; + }; + + llvm_unreachable("Unsupported MCExpr type in MCOperand!"); + return 0; +} + +uint32_t ARMMCCodeEmitter:: +getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx+1); + const MCOperand &MO2 = MI.getOperand(OpIdx+2); + unsigned Rn = getARMRegisterNumbering(MO.getReg()); + unsigned Rm = getARMRegisterNumbering(MO1.getReg()); + unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()); + bool isAdd = ARM_AM::getAM2Op(MO2.getImm()) == ARM_AM::add; + ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm()); + unsigned SBits = getShiftOp(ShOp); + + // {16-13} = Rn + // {12} = isAdd + // {11-0} = shifter + // {3-0} = Rm + // {4} = 0 + // {6-5} = type + // {11-7} = imm + uint32_t Binary = Rm; + Binary |= Rn << 13; + Binary |= SBits << 5; + Binary |= ShImm << 7; + if (isAdd) + Binary |= 1 << 12; + return Binary; +} + +uint32_t ARMMCCodeEmitter:: +getAddrMode2OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {17-14} Rn + // {13} 1 == imm12, 0 == Rm + // {12} isAdd + // {11-0} imm12/Rm + const MCOperand &MO = MI.getOperand(OpIdx); + unsigned Rn = getARMRegisterNumbering(MO.getReg()); + uint32_t Binary = getAddrMode2OffsetOpValue(MI, OpIdx + 1, Fixups); + Binary |= Rn << 14; + return Binary; +} + +uint32_t ARMMCCodeEmitter:: +getAddrMode2OffsetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {13} 1 == imm12, 0 == Rm + // {12} isAdd + // {11-0} imm12/Rm + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx+1); + unsigned Imm = MO1.getImm(); + bool isAdd = ARM_AM::getAM2Op(Imm) == ARM_AM::add; + bool isReg = MO.getReg() != 0; + uint32_t Binary = ARM_AM::getAM2Offset(Imm); + // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm12 + if (isReg) { + ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(Imm); + Binary <<= 7; // Shift amount is bits [11:7] + Binary |= getShiftOp(ShOp) << 5; // Shift type is bits [6:5] + Binary |= getARMRegisterNumbering(MO.getReg()); // Rm is bits [3:0] + } + return Binary | (isAdd << 12) | (isReg << 13); +} + +uint32_t ARMMCCodeEmitter:: +getPostIdxRegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {4} isAdd + // {3-0} Rm + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx+1); + bool isAdd = MO1.getImm() != 0; + return getARMRegisterNumbering(MO.getReg()) | (isAdd << 4); +} + +uint32_t ARMMCCodeEmitter:: +getAddrMode3OffsetOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {9} 1 == imm8, 0 == Rm + // {8} isAdd + // {7-4} imm7_4/zero + // {3-0} imm3_0/Rm + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx+1); + unsigned Imm = MO1.getImm(); + bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add; + bool isImm = MO.getReg() == 0; + uint32_t Imm8 = ARM_AM::getAM3Offset(Imm); + // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 + if (!isImm) + Imm8 = getARMRegisterNumbering(MO.getReg()); + return Imm8 | (isAdd << 8) | (isImm << 9); +} + +uint32_t ARMMCCodeEmitter:: +getAddrMode3OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {13} 1 == imm8, 0 == Rm + // {12-9} Rn + // {8} isAdd + // {7-4} imm7_4/zero + // {3-0} imm3_0/Rm + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx+1); + const MCOperand &MO2 = MI.getOperand(OpIdx+2); + unsigned Rn = getARMRegisterNumbering(MO.getReg()); + unsigned Imm = MO2.getImm(); + bool isAdd = ARM_AM::getAM3Op(Imm) == ARM_AM::add; + bool isImm = MO1.getReg() == 0; + uint32_t Imm8 = ARM_AM::getAM3Offset(Imm); + // if reg +/- reg, Rm will be non-zero. Otherwise, we have reg +/- imm8 + if (!isImm) + Imm8 = getARMRegisterNumbering(MO1.getReg()); + return (Rn << 9) | Imm8 | (isAdd << 8) | (isImm << 13); +} + +/// getAddrModeThumbSPOpValue - Encode the t_addrmode_sp operands. +uint32_t ARMMCCodeEmitter:: +getAddrModeThumbSPOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // [SP, #imm] + // {7-0} = imm8 + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + assert(MI.getOperand(OpIdx).getReg() == ARM::SP && + "Unexpected base register!"); + + // The immediate is already shifted for the implicit zeroes, so no change + // here. + return MO1.getImm() & 0xff; +} + +/// getAddrModeISOpValue - Encode the t_addrmode_is# operands. +uint32_t ARMMCCodeEmitter:: +getAddrModeISOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // [Rn, #imm] + // {7-3} = imm5 + // {2-0} = Rn + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + unsigned Rn = getARMRegisterNumbering(MO.getReg()); + unsigned Imm5 = MO1.getImm(); + return ((Imm5 & 0x1f) << 3) | Rn; +} + +/// getAddrModePCOpValue - Return encoding for t_addrmode_pc operands. +uint32_t ARMMCCodeEmitter:: +getAddrModePCOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand MO = MI.getOperand(OpIdx); + if (MO.isExpr()) + return ::getBranchTargetOpValue(MI, OpIdx, ARM::fixup_arm_thumb_cp, Fixups); + return (MO.getImm() >> 2); +} + +/// getAddrMode5OpValue - Return encoding info for 'reg +/- imm10' operand. +uint32_t ARMMCCodeEmitter:: +getAddrMode5OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // {12-9} = reg + // {8} = (U)nsigned (add == '1', sub == '0') + // {7-0} = imm8 + unsigned Reg, Imm8; + bool isAdd; + // If The first operand isn't a register, we have a label reference. + const MCOperand &MO = MI.getOperand(OpIdx); + if (!MO.isReg()) { + Reg = getARMRegisterNumbering(ARM::PC); // Rn is PC. + Imm8 = 0; + isAdd = false; // 'U' bit is handled as part of the fixup. + + assert(MO.isExpr() && "Unexpected machine operand type!"); + const MCExpr *Expr = MO.getExpr(); + MCFixupKind Kind; + if (isThumb2()) + Kind = MCFixupKind(ARM::fixup_t2_pcrel_10); + else + Kind = MCFixupKind(ARM::fixup_arm_pcrel_10); + Fixups.push_back(MCFixup::Create(0, Expr, Kind)); + + ++MCNumCPRelocations; + } else { + EncodeAddrModeOpValues(MI, OpIdx, Reg, Imm8, Fixups); + isAdd = ARM_AM::getAM5Op(Imm8) == ARM_AM::add; + } + + uint32_t Binary = ARM_AM::getAM5Offset(Imm8); + // Immediate is always encoded as positive. The 'U' bit controls add vs sub. + if (isAdd) + Binary |= (1 << 8); + Binary |= (Reg << 9); + return Binary; +} + +unsigned ARMMCCodeEmitter:: +getSORegRegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // Sub-operands are [reg, reg, imm]. The first register is Rm, the reg to be + // shifted. The second is Rs, the amount to shift by, and the third specifies + // the type of the shift. + // + // {3-0} = Rm. + // {4} = 1 + // {6-5} = type + // {11-8} = Rs + // {7} = 0 + + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + const MCOperand &MO2 = MI.getOperand(OpIdx + 2); + ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm()); + + // Encode Rm. + unsigned Binary = getARMRegisterNumbering(MO.getReg()); + + // Encode the shift opcode. + unsigned SBits = 0; + unsigned Rs = MO1.getReg(); + if (Rs) { + // Set shift operand (bit[7:4]). + // LSL - 0001 + // LSR - 0011 + // ASR - 0101 + // ROR - 0111 + switch (SOpc) { + default: llvm_unreachable("Unknown shift opc!"); + case ARM_AM::lsl: SBits = 0x1; break; + case ARM_AM::lsr: SBits = 0x3; break; + case ARM_AM::asr: SBits = 0x5; break; + case ARM_AM::ror: SBits = 0x7; break; + } + } + + Binary |= SBits << 4; + + // Encode the shift operation Rs. + // Encode Rs bit[11:8]. + assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0); + return Binary | (getARMRegisterNumbering(Rs) << ARMII::RegRsShift); +} + +unsigned ARMMCCodeEmitter:: +getSORegImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // Sub-operands are [reg, imm]. The first register is Rm, the reg to be + // shifted. The second is the amount to shift by. + // + // {3-0} = Rm. + // {4} = 0 + // {6-5} = type + // {11-7} = imm + + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm()); + + // Encode Rm. + unsigned Binary = getARMRegisterNumbering(MO.getReg()); + + // Encode the shift opcode. + unsigned SBits = 0; + + // Set shift operand (bit[6:4]). + // LSL - 000 + // LSR - 010 + // ASR - 100 + // ROR - 110 + // RRX - 110 and bit[11:8] clear. + switch (SOpc) { + default: llvm_unreachable("Unknown shift opc!"); + case ARM_AM::lsl: SBits = 0x0; break; + case ARM_AM::lsr: SBits = 0x2; break; + case ARM_AM::asr: SBits = 0x4; break; + case ARM_AM::ror: SBits = 0x6; break; + case ARM_AM::rrx: + Binary |= 0x60; + return Binary; + } + + // Encode shift_imm bit[11:7]. + Binary |= SBits << 4; + unsigned Offset = ARM_AM::getSORegOffset(MO1.getImm()); + assert(Offset && "Offset must be in range 1-32!"); + if (Offset == 32) Offset = 0; + return Binary | (Offset << 7); +} + + +unsigned ARMMCCodeEmitter:: +getT2AddrModeSORegOpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO1 = MI.getOperand(OpNum); + const MCOperand &MO2 = MI.getOperand(OpNum+1); + const MCOperand &MO3 = MI.getOperand(OpNum+2); + + // Encoded as [Rn, Rm, imm]. + // FIXME: Needs fixup support. + unsigned Value = getARMRegisterNumbering(MO1.getReg()); + Value <<= 4; + Value |= getARMRegisterNumbering(MO2.getReg()); + Value <<= 2; + Value |= MO3.getImm(); + + return Value; +} + +unsigned ARMMCCodeEmitter:: +getT2AddrModeImm8OpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO1 = MI.getOperand(OpNum); + const MCOperand &MO2 = MI.getOperand(OpNum+1); + + // FIXME: Needs fixup support. + unsigned Value = getARMRegisterNumbering(MO1.getReg()); + + // Even though the immediate is 8 bits long, we need 9 bits in order + // to represent the (inverse of the) sign bit. + Value <<= 9; + int32_t tmp = (int32_t)MO2.getImm(); + if (tmp < 0) + tmp = abs(tmp); + else + Value |= 256; // Set the ADD bit + Value |= tmp & 255; + return Value; +} + +unsigned ARMMCCodeEmitter:: +getT2AddrModeImm8OffsetOpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO1 = MI.getOperand(OpNum); + + // FIXME: Needs fixup support. + unsigned Value = 0; + int32_t tmp = (int32_t)MO1.getImm(); + if (tmp < 0) + tmp = abs(tmp); + else + Value |= 256; // Set the ADD bit + Value |= tmp & 255; + return Value; +} + +unsigned ARMMCCodeEmitter:: +getT2AddrModeImm12OffsetOpValue(const MCInst &MI, unsigned OpNum, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO1 = MI.getOperand(OpNum); + + // FIXME: Needs fixup support. + unsigned Value = 0; + int32_t tmp = (int32_t)MO1.getImm(); + if (tmp < 0) + tmp = abs(tmp); + else + Value |= 4096; // Set the ADD bit + Value |= tmp & 4095; + return Value; +} + +unsigned ARMMCCodeEmitter:: +getT2SORegOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + // Sub-operands are [reg, imm]. The first register is Rm, the reg to be + // shifted. The second is the amount to shift by. + // + // {3-0} = Rm. + // {4} = 0 + // {6-5} = type + // {11-7} = imm + + const MCOperand &MO = MI.getOperand(OpIdx); + const MCOperand &MO1 = MI.getOperand(OpIdx + 1); + ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO1.getImm()); + + // Encode Rm. + unsigned Binary = getARMRegisterNumbering(MO.getReg()); + + // Encode the shift opcode. + unsigned SBits = 0; + // Set shift operand (bit[6:4]). + // LSL - 000 + // LSR - 010 + // ASR - 100 + // ROR - 110 + switch (SOpc) { + default: llvm_unreachable("Unknown shift opc!"); + case ARM_AM::lsl: SBits = 0x0; break; + case ARM_AM::lsr: SBits = 0x2; break; + case ARM_AM::asr: SBits = 0x4; break; + case ARM_AM::rrx: // FALLTHROUGH + case ARM_AM::ror: SBits = 0x6; break; + } + + Binary |= SBits << 4; + if (SOpc == ARM_AM::rrx) + return Binary; + + // Encode shift_imm bit[11:7]. + return Binary | ARM_AM::getSORegOffset(MO1.getImm()) << 7; +} + +unsigned ARMMCCodeEmitter:: +getBitfieldInvertedMaskOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + // 10 bits. lower 5 bits are are the lsb of the mask, high five bits are the + // msb of the mask. + const MCOperand &MO = MI.getOperand(Op); + uint32_t v = ~MO.getImm(); + uint32_t lsb = CountTrailingZeros_32(v); + uint32_t msb = (32 - CountLeadingZeros_32 (v)) - 1; + assert (v != 0 && lsb < 32 && msb < 32 && "Illegal bitfield mask!"); + return lsb | (msb << 5); +} + +unsigned ARMMCCodeEmitter:: +getRegisterListOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + // VLDM/VSTM: + // {12-8} = Vd + // {7-0} = Number of registers + // + // LDM/STM: + // {15-0} = Bitfield of GPRs. + unsigned Reg = MI.getOperand(Op).getReg(); + bool SPRRegs = llvm::ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg); + bool DPRRegs = llvm::ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg); + + unsigned Binary = 0; + + if (SPRRegs || DPRRegs) { + // VLDM/VSTM + unsigned RegNo = getARMRegisterNumbering(Reg); + unsigned NumRegs = (MI.getNumOperands() - Op) & 0xff; + Binary |= (RegNo & 0x1f) << 8; + if (SPRRegs) + Binary |= NumRegs; + else + Binary |= NumRegs * 2; + } else { + for (unsigned I = Op, E = MI.getNumOperands(); I < E; ++I) { + unsigned RegNo = getARMRegisterNumbering(MI.getOperand(I).getReg()); + Binary |= 1 << RegNo; + } + } + + return Binary; +} + +/// getAddrMode6AddressOpValue - Encode an addrmode6 register number along +/// with the alignment operand. +unsigned ARMMCCodeEmitter:: +getAddrMode6AddressOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &Reg = MI.getOperand(Op); + const MCOperand &Imm = MI.getOperand(Op + 1); + + unsigned RegNo = getARMRegisterNumbering(Reg.getReg()); + unsigned Align = 0; + + switch (Imm.getImm()) { + default: break; + case 2: + case 4: + case 8: Align = 0x01; break; + case 16: Align = 0x02; break; + case 32: Align = 0x03; break; + } + + return RegNo | (Align << 4); +} + +/// getAddrMode6OneLane32AddressOpValue - Encode an addrmode6 register number +/// along with the alignment operand for use in VST1 and VLD1 with size 32. +unsigned ARMMCCodeEmitter:: +getAddrMode6OneLane32AddressOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &Reg = MI.getOperand(Op); + const MCOperand &Imm = MI.getOperand(Op + 1); + + unsigned RegNo = getARMRegisterNumbering(Reg.getReg()); + unsigned Align = 0; + + switch (Imm.getImm()) { + default: break; + case 2: + case 4: + case 8: + case 16: Align = 0x00; break; + case 32: Align = 0x03; break; + } + + return RegNo | (Align << 4); +} + + +/// getAddrMode6DupAddressOpValue - Encode an addrmode6 register number and +/// alignment operand for use in VLD-dup instructions. This is the same as +/// getAddrMode6AddressOpValue except for the alignment encoding, which is +/// different for VLD4-dup. +unsigned ARMMCCodeEmitter:: +getAddrMode6DupAddressOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &Reg = MI.getOperand(Op); + const MCOperand &Imm = MI.getOperand(Op + 1); + + unsigned RegNo = getARMRegisterNumbering(Reg.getReg()); + unsigned Align = 0; + + switch (Imm.getImm()) { + default: break; + case 2: + case 4: + case 8: Align = 0x01; break; + case 16: Align = 0x03; break; + } + + return RegNo | (Align << 4); +} + +unsigned ARMMCCodeEmitter:: +getAddrMode6OffsetOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(Op); + if (MO.getReg() == 0) return 0x0D; + return MO.getReg(); +} + +unsigned ARMMCCodeEmitter:: +getShiftRight8Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + return 8 - MI.getOperand(Op).getImm(); +} + +unsigned ARMMCCodeEmitter:: +getShiftRight16Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + return 16 - MI.getOperand(Op).getImm(); +} + +unsigned ARMMCCodeEmitter:: +getShiftRight32Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + return 32 - MI.getOperand(Op).getImm(); +} + +unsigned ARMMCCodeEmitter:: +getShiftRight64Imm(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const { + return 64 - MI.getOperand(Op).getImm(); +} + +void ARMMCCodeEmitter:: +EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups) const { + // Pseudo instructions don't get encoded. + const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); + uint64_t TSFlags = Desc.TSFlags; + if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo) + return; + + int Size; + if (Desc.getSize() == 2 || Desc.getSize() == 4) + Size = Desc.getSize(); + else + llvm_unreachable("Unexpected instruction size!"); + + uint32_t Binary = getBinaryCodeForInstr(MI, Fixups); + // Thumb 32-bit wide instructions need to emit the high order halfword + // first. + if (isThumb() && Size == 4) { + EmitConstant(Binary >> 16, 2, OS); + EmitConstant(Binary & 0xffff, 2, OS); + } else + EmitConstant(Binary, Size, OS); + ++MCNumEmitted; // Keep track of the # of mi's emitted. +} + +#include "ARMGenMCCodeEmitter.inc" diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp new file mode 100644 index 0000000..2727ba8 --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.cpp @@ -0,0 +1,73 @@ +//===-- ARMMCExpr.cpp - ARM specific MC expression classes ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "armmcexpr" +#include "ARMMCExpr.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCAssembler.h" +using namespace llvm; + +const ARMMCExpr* +ARMMCExpr::Create(VariantKind Kind, const MCExpr *Expr, + MCContext &Ctx) { + return new (Ctx) ARMMCExpr(Kind, Expr); +} + +void ARMMCExpr::PrintImpl(raw_ostream &OS) const { + switch (Kind) { + default: assert(0 && "Invalid kind!"); + case VK_ARM_HI16: OS << ":upper16:"; break; + case VK_ARM_LO16: OS << ":lower16:"; break; + } + + const MCExpr *Expr = getSubExpr(); + if (Expr->getKind() != MCExpr::SymbolRef) + OS << '('; + Expr->print(OS); + if (Expr->getKind() != MCExpr::SymbolRef) + OS << ')'; +} + +bool +ARMMCExpr::EvaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout) const { + return false; +} + +// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps +// that method should be made public? +static void AddValueSymbols_(const MCExpr *Value, MCAssembler *Asm) { + switch (Value->getKind()) { + case MCExpr::Target: + assert(0 && "Can't handle nested target expr!"); + break; + + case MCExpr::Constant: + break; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value); + AddValueSymbols_(BE->getLHS(), Asm); + AddValueSymbols_(BE->getRHS(), Asm); + break; + } + + case MCExpr::SymbolRef: + Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol()); + break; + + case MCExpr::Unary: + AddValueSymbols_(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm); + break; + } +} + +void ARMMCExpr::AddValueSymbols(MCAssembler *Asm) const { + AddValueSymbols_(getSubExpr(), Asm); +} diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h new file mode 100644 index 0000000..0a2e883 --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h @@ -0,0 +1,76 @@ +//===-- ARMMCExpr.h - ARM specific MC expression classes ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef ARMMCEXPR_H +#define ARMMCEXPR_H + +#include "llvm/MC/MCExpr.h" + +namespace llvm { + +class ARMMCExpr : public MCTargetExpr { +public: + enum VariantKind { + VK_ARM_None, + VK_ARM_HI16, // The R_ARM_MOVT_ABS relocation (:upper16: in the .s file) + VK_ARM_LO16 // The R_ARM_MOVW_ABS_NC relocation (:lower16: in the .s file) + }; + +private: + const VariantKind Kind; + const MCExpr *Expr; + + explicit ARMMCExpr(VariantKind _Kind, const MCExpr *_Expr) + : Kind(_Kind), Expr(_Expr) {} + +public: + /// @name Construction + /// @{ + + static const ARMMCExpr *Create(VariantKind Kind, const MCExpr *Expr, + MCContext &Ctx); + + static const ARMMCExpr *CreateUpper16(const MCExpr *Expr, MCContext &Ctx) { + return Create(VK_ARM_HI16, Expr, Ctx); + } + + static const ARMMCExpr *CreateLower16(const MCExpr *Expr, MCContext &Ctx) { + return Create(VK_ARM_LO16, Expr, Ctx); + } + + /// @} + /// @name Accessors + /// @{ + + /// getOpcode - Get the kind of this expression. + VariantKind getKind() const { return Kind; } + + /// getSubExpr - Get the child of this expression. + const MCExpr *getSubExpr() const { return Expr; } + + /// @} + + void PrintImpl(raw_ostream &OS) const; + bool EvaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout) const; + void AddValueSymbols(MCAssembler *) const; + const MCSection *FindAssociatedSection() const { + return getSubExpr()->FindAssociatedSection(); + } + + static bool classof(const MCExpr *E) { + return E->getKind() == MCExpr::Target; + } + + static bool classof(const ARMMCExpr *) { return true; } + +}; +} // end namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp index f8fcf2b..a55c410 100644 --- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp @@ -13,10 +13,16 @@ #include "ARMMCTargetDesc.h" #include "ARMMCAsmInfo.h" +#include "ARMBaseInfo.h" +#include "InstPrinter/ARMInstPrinter.h" +#include "llvm/MC/MCCodeGenInfo.h" +#include "llvm/MC/MCInstrAnalysis.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSubtargetInfo.h" -#include "llvm/Target/TargetRegistry.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" #define GET_REGINFO_MC_DESC #include "ARMGenRegisterInfo.inc" @@ -35,7 +41,7 @@ std::string ARM_MC::ParseARMTriple(StringRef TT) { unsigned Len = TT.size(); unsigned Idx = 0; - // FIXME: Enahnce Triple helper class to extract ARM version. + // FIXME: Enhance Triple helper class to extract ARM version. bool isThumb = false; if (Len >= 5 && TT.substr(0, 4) == "armv") Idx = 4; @@ -50,18 +56,21 @@ std::string ARM_MC::ParseARMTriple(StringRef TT) { unsigned SubVer = TT[Idx]; if (SubVer >= '7' && SubVer <= '9') { if (Len >= Idx+2 && TT[Idx+1] == 'm') { - // v7m: FeatureNoARM, FeatureDB, FeatureHWDiv - ARMArchFeature = "+v7,+noarm,+db,+hwdiv"; + // v7m: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureMClass + ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+mclass"; } else if (Len >= Idx+3 && TT[Idx+1] == 'e'&& TT[Idx+2] == 'm') { // v7em: FeatureNoARM, FeatureDB, FeatureHWDiv, FeatureDSPThumb2, - // FeatureT2XtPk - ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+t2dsp,t2xtpk"; + // FeatureT2XtPk, FeatureMClass + ARMArchFeature = "+v7,+noarm,+db,+hwdiv,+t2dsp,t2xtpk,+mclass"; } else - // v7a: FeatureNEON, FeatureDB, FeatureDSPThumb2 - ARMArchFeature = "+v7,+neon,+db,+t2dsp"; + // v7a: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk + ARMArchFeature = "+v7,+neon,+db,+t2dsp,+t2xtpk"; } else if (SubVer == '6') { if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2') ARMArchFeature = "+v6t2"; + else if (Len >= Idx+2 && TT[Idx+1] == 'm') + // v6m: FeatureNoARM, FeatureMClass + ARMArchFeature = "+v6t2,+noarm,+mclass"; else ARMArchFeature = "+v6"; } else if (SubVer == '5') { @@ -80,6 +89,14 @@ std::string ARM_MC::ParseARMTriple(StringRef TT) { ARMArchFeature += ",+thumb-mode"; } + Triple TheTriple(TT); + if (TheTriple.getOS() == Triple::NativeClient) { + if (ARMArchFeature.empty()) + ARMArchFeature = "+nacl-mode"; + else + ARMArchFeature += ",+nacl-mode"; + } + return ARMArchFeature; } @@ -98,36 +115,18 @@ MCSubtargetInfo *ARM_MC::createARMMCSubtargetInfo(StringRef TT, StringRef CPU, return X; } -// Force static initialization. -extern "C" void LLVMInitializeARMMCSubtargetInfo() { - TargetRegistry::RegisterMCSubtargetInfo(TheARMTarget, - ARM_MC::createARMMCSubtargetInfo); - TargetRegistry::RegisterMCSubtargetInfo(TheThumbTarget, - ARM_MC::createARMMCSubtargetInfo); -} - static MCInstrInfo *createARMMCInstrInfo() { MCInstrInfo *X = new MCInstrInfo(); InitARMMCInstrInfo(X); return X; } -extern "C" void LLVMInitializeARMMCInstrInfo() { - TargetRegistry::RegisterMCInstrInfo(TheARMTarget, createARMMCInstrInfo); - TargetRegistry::RegisterMCInstrInfo(TheThumbTarget, createARMMCInstrInfo); -} - -static MCRegisterInfo *createARMMCRegisterInfo() { +static MCRegisterInfo *createARMMCRegisterInfo(StringRef Triple) { MCRegisterInfo *X = new MCRegisterInfo(); - InitARMMCRegisterInfo(X); + InitARMMCRegisterInfo(X, ARM::LR); return X; } -extern "C" void LLVMInitializeARMMCRegInfo() { - TargetRegistry::RegisterMCRegInfo(TheARMTarget, createARMMCRegisterInfo); - TargetRegistry::RegisterMCRegInfo(TheThumbTarget, createARMMCRegisterInfo); -} - static MCAsmInfo *createARMMCAsmInfo(const Target &T, StringRef TT) { Triple TheTriple(TT); @@ -137,8 +136,128 @@ static MCAsmInfo *createARMMCAsmInfo(const Target &T, StringRef TT) { return new ARMELFMCAsmInfo(); } -extern "C" void LLVMInitializeARMMCAsmInfo() { - // Register the target asm info. +static MCCodeGenInfo *createARMMCCodeGenInfo(StringRef TT, Reloc::Model RM, + CodeModel::Model CM) { + MCCodeGenInfo *X = new MCCodeGenInfo(); + if (RM == Reloc::Default) { + Triple TheTriple(TT); + // Default relocation model on Darwin is PIC, not DynamicNoPIC. + RM = TheTriple.isOSDarwin() ? Reloc::PIC_ : Reloc::DynamicNoPIC; + } + X->InitMCCodeGenInfo(RM, CM); + return X; +} + +// This is duplicated code. Refactor this. +static MCStreamer *createMCStreamer(const Target &T, StringRef TT, + MCContext &Ctx, MCAsmBackend &MAB, + raw_ostream &OS, + MCCodeEmitter *Emitter, + bool RelaxAll, + bool NoExecStack) { + Triple TheTriple(TT); + + if (TheTriple.isOSDarwin()) + return createMachOStreamer(Ctx, MAB, OS, Emitter, RelaxAll); + + if (TheTriple.isOSWindows()) { + llvm_unreachable("ARM does not support Windows COFF format"); + return NULL; + } + + return createELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack); +} + +static MCInstPrinter *createARMMCInstPrinter(const Target &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCSubtargetInfo &STI) { + if (SyntaxVariant == 0) + return new ARMInstPrinter(MAI, STI); + return 0; +} + +namespace { + +class ARMMCInstrAnalysis : public MCInstrAnalysis { +public: + ARMMCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {} + + virtual bool isUnconditionalBranch(const MCInst &Inst) const { + // BCCs with the "always" predicate are unconditional branches. + if (Inst.getOpcode() == ARM::Bcc && Inst.getOperand(1).getImm()==ARMCC::AL) + return true; + return MCInstrAnalysis::isUnconditionalBranch(Inst); + } + + virtual bool isConditionalBranch(const MCInst &Inst) const { + // BCCs with the "always" predicate are unconditional branches. + if (Inst.getOpcode() == ARM::Bcc && Inst.getOperand(1).getImm()==ARMCC::AL) + return false; + return MCInstrAnalysis::isConditionalBranch(Inst); + } + + uint64_t evaluateBranch(const MCInst &Inst, uint64_t Addr, + uint64_t Size) const { + // We only handle PCRel branches for now. + if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType!=MCOI::OPERAND_PCREL) + return -1ULL; + + int64_t Imm = Inst.getOperand(0).getImm(); + // FIXME: This is not right for thumb. + return Addr+Imm+8; // In ARM mode the PC is always off by 8 bytes. + } +}; + +} + +static MCInstrAnalysis *createARMMCInstrAnalysis(const MCInstrInfo *Info) { + return new ARMMCInstrAnalysis(Info); +} + +// Force static initialization. +extern "C" void LLVMInitializeARMTargetMC() { + // Register the MC asm info. RegisterMCAsmInfoFn A(TheARMTarget, createARMMCAsmInfo); RegisterMCAsmInfoFn B(TheThumbTarget, createARMMCAsmInfo); + + // Register the MC codegen info. + TargetRegistry::RegisterMCCodeGenInfo(TheARMTarget, createARMMCCodeGenInfo); + TargetRegistry::RegisterMCCodeGenInfo(TheThumbTarget, createARMMCCodeGenInfo); + + // Register the MC instruction info. + TargetRegistry::RegisterMCInstrInfo(TheARMTarget, createARMMCInstrInfo); + TargetRegistry::RegisterMCInstrInfo(TheThumbTarget, createARMMCInstrInfo); + + // Register the MC register info. + TargetRegistry::RegisterMCRegInfo(TheARMTarget, createARMMCRegisterInfo); + TargetRegistry::RegisterMCRegInfo(TheThumbTarget, createARMMCRegisterInfo); + + // Register the MC subtarget info. + TargetRegistry::RegisterMCSubtargetInfo(TheARMTarget, + ARM_MC::createARMMCSubtargetInfo); + TargetRegistry::RegisterMCSubtargetInfo(TheThumbTarget, + ARM_MC::createARMMCSubtargetInfo); + + // Register the MC instruction analyzer. + TargetRegistry::RegisterMCInstrAnalysis(TheARMTarget, + createARMMCInstrAnalysis); + TargetRegistry::RegisterMCInstrAnalysis(TheThumbTarget, + createARMMCInstrAnalysis); + + // Register the MC Code Emitter + TargetRegistry::RegisterMCCodeEmitter(TheARMTarget, createARMMCCodeEmitter); + TargetRegistry::RegisterMCCodeEmitter(TheThumbTarget, createARMMCCodeEmitter); + + // Register the asm backend. + TargetRegistry::RegisterMCAsmBackend(TheARMTarget, createARMAsmBackend); + TargetRegistry::RegisterMCAsmBackend(TheThumbTarget, createARMAsmBackend); + + // Register the object streamer. + TargetRegistry::RegisterMCObjectStreamer(TheARMTarget, createMCStreamer); + TargetRegistry::RegisterMCObjectStreamer(TheThumbTarget, createMCStreamer); + + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(TheARMTarget, createARMMCInstPrinter); + TargetRegistry::RegisterMCInstPrinter(TheThumbTarget, createARMMCInstPrinter); } diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h index 74701e3..9b3d3bd 100644 --- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h @@ -14,12 +14,19 @@ #ifndef ARMMCTARGETDESC_H #define ARMMCTARGETDESC_H +#include "llvm/Support/DataTypes.h" #include <string> namespace llvm { +class MCAsmBackend; +class MCCodeEmitter; +class MCContext; +class MCInstrInfo; +class MCObjectWriter; class MCSubtargetInfo; -class Target; class StringRef; +class Target; +class raw_ostream; extern Target TheARMTarget, TheThumbTarget; @@ -33,6 +40,18 @@ namespace ARM_MC { StringRef FS); } +MCCodeEmitter *createARMMCCodeEmitter(const MCInstrInfo &MCII, + const MCSubtargetInfo &STI, + MCContext &Ctx); + +MCAsmBackend *createARMAsmBackend(const Target &T, StringRef TT); + +/// createARMMachObjectWriter - Construct an ARM Mach-O object writer. +MCObjectWriter *createARMMachObjectWriter(raw_ostream &OS, + bool Is64Bit, + uint32_t CPUType, + uint32_t CPUSubtype); + } // End llvm namespace // Defines symbolic names for ARM registers. This defines a mapping from diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp new file mode 100644 index 0000000..352c73e --- /dev/null +++ b/contrib/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp @@ -0,0 +1,388 @@ +//===-- ARMMachObjectWriter.cpp - ARM Mach Object Writer ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/ARMBaseInfo.h" +#include "MCTargetDesc/ARMFixupKinds.h" +#include "llvm/ADT/Twine.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCAsmLayout.h" +#include "llvm/MC/MCMachObjectWriter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCFixup.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Object/MachOFormat.h" +#include "llvm/Support/ErrorHandling.h" +using namespace llvm; +using namespace llvm::object; + +namespace { +class ARMMachObjectWriter : public MCMachObjectTargetWriter { + void RecordARMScatteredRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + unsigned Log2Size, + uint64_t &FixedValue); + void RecordARMMovwMovtRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, MCValue Target, + uint64_t &FixedValue); + +public: + ARMMachObjectWriter(bool Is64Bit, uint32_t CPUType, + uint32_t CPUSubtype) + : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype, + /*UseAggressiveSymbolFolding=*/true) {} + + void RecordRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, const MCAsmLayout &Layout, + const MCFragment *Fragment, const MCFixup &Fixup, + MCValue Target, uint64_t &FixedValue); +}; +} + +static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType, + unsigned &Log2Size) { + RelocType = unsigned(macho::RIT_Vanilla); + Log2Size = ~0U; + + switch (Kind) { + default: + return false; + + case FK_Data_1: + Log2Size = llvm::Log2_32(1); + return true; + case FK_Data_2: + Log2Size = llvm::Log2_32(2); + return true; + case FK_Data_4: + Log2Size = llvm::Log2_32(4); + return true; + case FK_Data_8: + Log2Size = llvm::Log2_32(8); + return true; + + // Handle 24-bit branch kinds. + case ARM::fixup_arm_ldst_pcrel_12: + case ARM::fixup_arm_pcrel_10: + case ARM::fixup_arm_adr_pcrel_12: + case ARM::fixup_arm_condbranch: + case ARM::fixup_arm_uncondbranch: + RelocType = unsigned(macho::RIT_ARM_Branch24Bit); + // Report as 'long', even though that is not quite accurate. + Log2Size = llvm::Log2_32(4); + return true; + + // Handle Thumb branches. + case ARM::fixup_arm_thumb_br: + RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit); + Log2Size = llvm::Log2_32(2); + return true; + + case ARM::fixup_t2_uncondbranch: + case ARM::fixup_arm_thumb_bl: + case ARM::fixup_arm_thumb_blx: + RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit); + Log2Size = llvm::Log2_32(4); + return true; + + case ARM::fixup_arm_movt_hi16: + case ARM::fixup_arm_movt_hi16_pcrel: + case ARM::fixup_t2_movt_hi16: + case ARM::fixup_t2_movt_hi16_pcrel: + RelocType = unsigned(macho::RIT_ARM_HalfDifference); + // Report as 'long', even though that is not quite accurate. + Log2Size = llvm::Log2_32(4); + return true; + + case ARM::fixup_arm_movw_lo16: + case ARM::fixup_arm_movw_lo16_pcrel: + case ARM::fixup_t2_movw_lo16: + case ARM::fixup_t2_movw_lo16_pcrel: + RelocType = unsigned(macho::RIT_ARM_Half); + // Report as 'long', even though that is not quite accurate. + Log2Size = llvm::Log2_32(4); + return true; + } +} + +void ARMMachObjectWriter:: +RecordARMMovwMovtRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + uint64_t &FixedValue) { + uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset(); + unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind()); + unsigned Type = macho::RIT_ARM_Half; + + // See <reloc.h>. + const MCSymbol *A = &Target.getSymA()->getSymbol(); + MCSymbolData *A_SD = &Asm.getSymbolData(*A); + + if (!A_SD->getFragment()) + report_fatal_error("symbol '" + A->getName() + + "' can not be undefined in a subtraction expression"); + + uint32_t Value = Writer->getSymbolAddress(A_SD, Layout); + uint32_t Value2 = 0; + uint64_t SecAddr = + Writer->getSectionAddress(A_SD->getFragment()->getParent()); + FixedValue += SecAddr; + + if (const MCSymbolRefExpr *B = Target.getSymB()) { + MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol()); + + if (!B_SD->getFragment()) + report_fatal_error("symbol '" + B->getSymbol().getName() + + "' can not be undefined in a subtraction expression"); + + // Select the appropriate difference relocation type. + Type = macho::RIT_ARM_HalfDifference; + Value2 = Writer->getSymbolAddress(B_SD, Layout); + FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent()); + } + + // Relocations are written out in reverse order, so the PAIR comes first. + // ARM_RELOC_HALF and ARM_RELOC_HALF_SECTDIFF abuse the r_length field: + // + // For these two r_type relocations they always have a pair following them and + // the r_length bits are used differently. The encoding of the r_length is as + // follows: + // low bit of r_length: + // 0 - :lower16: for movw instructions + // 1 - :upper16: for movt instructions + // high bit of r_length: + // 0 - arm instructions + // 1 - thumb instructions + // the other half of the relocated expression is in the following pair + // relocation entry in the the low 16 bits of r_address field. + unsigned ThumbBit = 0; + unsigned MovtBit = 0; + switch ((unsigned)Fixup.getKind()) { + default: break; + case ARM::fixup_arm_movt_hi16: + case ARM::fixup_arm_movt_hi16_pcrel: + MovtBit = 1; + break; + case ARM::fixup_t2_movt_hi16: + case ARM::fixup_t2_movt_hi16_pcrel: + MovtBit = 1; + // Fallthrough + case ARM::fixup_t2_movw_lo16: + case ARM::fixup_t2_movw_lo16_pcrel: + ThumbBit = 1; + break; + } + + + if (Type == macho::RIT_ARM_HalfDifference) { + uint32_t OtherHalf = MovtBit + ? (FixedValue & 0xffff) : ((FixedValue & 0xffff0000) >> 16); + + macho::RelocationEntry MRE; + MRE.Word0 = ((OtherHalf << 0) | + (macho::RIT_Pair << 24) | + (MovtBit << 28) | + (ThumbBit << 29) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value2; + Writer->addRelocation(Fragment->getParent(), MRE); + } + + macho::RelocationEntry MRE; + MRE.Word0 = ((FixupOffset << 0) | + (Type << 24) | + (MovtBit << 28) | + (ThumbBit << 29) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value; + Writer->addRelocation(Fragment->getParent(), MRE); +} + +void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + unsigned Log2Size, + uint64_t &FixedValue) { + uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset(); + unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind()); + unsigned Type = macho::RIT_Vanilla; + + // See <reloc.h>. + const MCSymbol *A = &Target.getSymA()->getSymbol(); + MCSymbolData *A_SD = &Asm.getSymbolData(*A); + + if (!A_SD->getFragment()) + report_fatal_error("symbol '" + A->getName() + + "' can not be undefined in a subtraction expression"); + + uint32_t Value = Writer->getSymbolAddress(A_SD, Layout); + uint64_t SecAddr = Writer->getSectionAddress(A_SD->getFragment()->getParent()); + FixedValue += SecAddr; + uint32_t Value2 = 0; + + if (const MCSymbolRefExpr *B = Target.getSymB()) { + MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol()); + + if (!B_SD->getFragment()) + report_fatal_error("symbol '" + B->getSymbol().getName() + + "' can not be undefined in a subtraction expression"); + + // Select the appropriate difference relocation type. + Type = macho::RIT_Difference; + Value2 = Writer->getSymbolAddress(B_SD, Layout); + FixedValue -= Writer->getSectionAddress(B_SD->getFragment()->getParent()); + } + + // Relocations are written out in reverse order, so the PAIR comes first. + if (Type == macho::RIT_Difference || + Type == macho::RIT_Generic_LocalDifference) { + macho::RelocationEntry MRE; + MRE.Word0 = ((0 << 0) | + (macho::RIT_Pair << 24) | + (Log2Size << 28) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value2; + Writer->addRelocation(Fragment->getParent(), MRE); + } + + macho::RelocationEntry MRE; + MRE.Word0 = ((FixupOffset << 0) | + (Type << 24) | + (Log2Size << 28) | + (IsPCRel << 30) | + macho::RF_Scattered); + MRE.Word1 = Value; + Writer->addRelocation(Fragment->getParent(), MRE); +} + +void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFragment *Fragment, + const MCFixup &Fixup, + MCValue Target, + uint64_t &FixedValue) { + unsigned IsPCRel = Writer->isFixupKindPCRel(Asm, Fixup.getKind()); + unsigned Log2Size; + unsigned RelocType = macho::RIT_Vanilla; + if (!getARMFixupKindMachOInfo(Fixup.getKind(), RelocType, Log2Size)) { + report_fatal_error("unknown ARM fixup kind!"); + return; + } + + // If this is a difference or a defined symbol plus an offset, then we need a + // scattered relocation entry. Differences always require scattered + // relocations. + if (Target.getSymB()) { + if (RelocType == macho::RIT_ARM_Half || + RelocType == macho::RIT_ARM_HalfDifference) + return RecordARMMovwMovtRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, FixedValue); + return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, Log2Size, FixedValue); + } + + // Get the symbol data, if any. + MCSymbolData *SD = 0; + if (Target.getSymA()) + SD = &Asm.getSymbolData(Target.getSymA()->getSymbol()); + + // FIXME: For other platforms, we need to use scattered relocations for + // internal relocations with offsets. If this is an internal relocation with + // an offset, it also needs a scattered relocation entry. + // + // Is this right for ARM? + uint32_t Offset = Target.getConstant(); + if (IsPCRel && RelocType == macho::RIT_Vanilla) + Offset += 1 << Log2Size; + if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD)) + return RecordARMScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, Log2Size, FixedValue); + + // See <reloc.h>. + uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset(); + unsigned Index = 0; + unsigned IsExtern = 0; + unsigned Type = 0; + + if (Target.isAbsolute()) { // constant + // FIXME! + report_fatal_error("FIXME: relocations to absolute targets " + "not yet implemented"); + } else { + // Resolve constant variables. + if (SD->getSymbol().isVariable()) { + int64_t Res; + if (SD->getSymbol().getVariableValue()->EvaluateAsAbsolute( + Res, Layout, Writer->getSectionAddressMap())) { + FixedValue = Res; + return; + } + } + + // Check whether we need an external or internal relocation. + if (Writer->doesSymbolRequireExternRelocation(SD)) { + IsExtern = 1; + Index = SD->getIndex(); + + // For external relocations, make sure to offset the fixup value to + // compensate for the addend of the symbol address, if it was + // undefined. This occurs with weak definitions, for example. + if (!SD->Symbol->isUndefined()) + FixedValue -= Layout.getSymbolOffset(SD); + } else { + // The index is the section ordinal (1-based). + const MCSectionData &SymSD = Asm.getSectionData( + SD->getSymbol().getSection()); + Index = SymSD.getOrdinal() + 1; + FixedValue += Writer->getSectionAddress(&SymSD); + } + if (IsPCRel) + FixedValue -= Writer->getSectionAddress(Fragment->getParent()); + + // The type is determined by the fixup kind. + Type = RelocType; + } + + // struct relocation_info (8 bytes) + macho::RelocationEntry MRE; + MRE.Word0 = FixupOffset; + MRE.Word1 = ((Index << 0) | + (IsPCRel << 24) | + (Log2Size << 25) | + (IsExtern << 27) | + (Type << 28)); + Writer->addRelocation(Fragment->getParent(), MRE); +} + +MCObjectWriter *llvm::createARMMachObjectWriter(raw_ostream &OS, + bool Is64Bit, + uint32_t CPUType, + uint32_t CPUSubtype) { + return createMachObjectWriter(new ARMMachObjectWriter(Is64Bit, + CPUType, + CPUSubtype), + OS, /*IsLittleEndian=*/true); +} diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/CMakeLists.txt b/contrib/llvm/lib/Target/ARM/MCTargetDesc/CMakeLists.txt deleted file mode 100644 index 68daf42..0000000 --- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -add_llvm_library(LLVMARMDesc - ARMMCTargetDesc.cpp - ARMMCAsmInfo.cpp - ) - -# Hack: we need to include 'main' target directory to grab private headers -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/..) diff --git a/contrib/llvm/lib/Target/ARM/MCTargetDesc/Makefile b/contrib/llvm/lib/Target/ARM/MCTargetDesc/Makefile deleted file mode 100644 index 448ed9d..0000000 --- a/contrib/llvm/lib/Target/ARM/MCTargetDesc/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -##===- lib/Target/ARM/TargetDesc/Makefile ------------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## - -LEVEL = ../../../.. -LIBRARYNAME = LLVMARMDesc - -# Hack: we need to include 'main' target directory to grab private headers -CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. - -include $(LEVEL)/Makefile.common |