summaryrefslogtreecommitdiffstats
path: root/src/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm')
-rw-r--r--src/arch/arm/.gitattributes1
-rw-r--r--src/arch/arm/.gitignore15
-rw-r--r--src/arch/arm/Makefile.am27
-rw-r--r--src/arch/arm/arm-codegen.c193
-rw-r--r--src/arch/arm/arm-codegen.h1127
-rw-r--r--src/arch/arm/arm-dis.c509
-rw-r--r--src/arch/arm/arm-dis.h41
-rw-r--r--src/arch/arm/arm-vfp-codegen.h247
-rw-r--r--src/arch/arm/arm-wmmx.h177
-rw-r--r--src/arch/arm/cmp_macros.th56
-rw-r--r--src/arch/arm/dpi_macros.th112
-rwxr-xr-xsrc/arch/arm/dpiops.sh30
-rw-r--r--src/arch/arm/mov_macros.th121
-rw-r--r--src/arch/arm/tramp.c710
-rw-r--r--src/arch/arm/vfp_macros.th15
-rw-r--r--src/arch/arm/vfpm_macros.th14
-rwxr-xr-xsrc/arch/arm/vfpops.sh24
17 files changed, 3419 insertions, 0 deletions
diff --git a/src/arch/arm/.gitattributes b/src/arch/arm/.gitattributes
new file mode 100644
index 0000000..4819db1
--- /dev/null
+++ b/src/arch/arm/.gitattributes
@@ -0,0 +1 @@
+/arm-wmmx.h -crlf
diff --git a/src/arch/arm/.gitignore b/src/arch/arm/.gitignore
new file mode 100644
index 0000000..978145d
--- /dev/null
+++ b/src/arch/arm/.gitignore
@@ -0,0 +1,15 @@
+/Makefile
+/Makefile.in
+/.deps
+/.libs
+/*.o
+/*.la
+/*.lo
+/*.lib
+/*.obj
+/*.exe
+/*.dll
+/arm_dpimacros.h
+/arm_fpamacros.h
+/arm_vfpmacros.h
+/fixeol.sh
diff --git a/src/arch/arm/Makefile.am b/src/arch/arm/Makefile.am
new file mode 100644
index 0000000..593574c
--- /dev/null
+++ b/src/arch/arm/Makefile.am
@@ -0,0 +1,27 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-arm.la
+
+BUILT_SOURCES = arm_dpimacros.h arm_vfpmacros.h
+
+
+libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \
+ arm-codegen.c \
+ arm-codegen.h \
+ arm-dis.c \
+ arm-dis.h
+
+arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th
+ (cd $(srcdir); bash ./dpiops.sh) > $@t
+ mv $@t $@
+
+arm_vfpmacros.h: vfpops.sh vfpm_macros.th vfp_macros.th
+ (cd $(srcdir); bash ./vfpops.sh) > $@t
+ mv $@t $@
+
+CLEANFILES = $(BUILT_SOURCES)
+
+EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th \
+ vfpm_macros.th vfp_macros.th arm-vfp-codegen.h vfpops.sh
+
diff --git a/src/arch/arm/arm-codegen.c b/src/arch/arm/arm-codegen.c
new file mode 100644
index 0000000..9914ace
--- /dev/null
+++ b/src/arch/arm/arm-codegen.c
@@ -0,0 +1,193 @@
+/*
+ * arm-codegen.c
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#include "arm-codegen.h"
+
+
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+
+ /* save args */
+ ARM_PUSH(p, (1 << ARMREG_A1)
+ | (1 << ARMREG_A2)
+ | (1 << ARMREG_A3)
+ | (1 << ARMREG_A4));
+
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs) {
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_ADD_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_ADD_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ ARM_POP_NWB(p, (1 << ARMREG_SP) | (1 << ARMREG_PC) | (pop_regs & 0x3FF));
+
+ return p;
+}
+
+
+/* do not push A1-A4 */
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+ /* push_regs upto R10 will be saved */
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR) | (push_regs & 0x3FF));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ /* restore IP from stack */
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+/* Bit scan forward. */
+int arm_bsf(armword_t val) {
+ int i;
+ armword_t mask;
+
+ if (val == 0) return 0;
+ for (i=1, mask=1; (i <= 8 * sizeof(armword_t)) && ((val & mask) == 0); ++i, mask<<=1);
+
+ return i;
+}
+
+
+int arm_is_power_of_2(armword_t val) {
+ return ((val & (val-1)) == 0);
+}
+
+
+/*
+ * returns:
+ * 1 - unable to represent
+ * positive even number - MOV-representable
+ * negative even number - MVN-representable
+ */
+int calc_arm_mov_const_shift(armword_t val) {
+ armword_t mask;
+ int res = 1, shift;
+
+ for (shift=0; shift < 32; shift+=2) {
+ mask = ARM_SCALE(0xFF, shift);
+ if ((val & (~mask)) == 0) {
+ res = shift;
+ break;
+ }
+ if (((~val) & (~mask)) == 0) {
+ res = -shift - 2;
+ break;
+ }
+ }
+
+ return res;
+}
+
+
+int is_arm_const(armword_t val) {
+ int res;
+ res = arm_is_power_of_2(val);
+ if (!res) {
+ res = calc_arm_mov_const_shift(val);
+ res = !(res < 0 || res == 1);
+ }
+ return res;
+}
+
+
+int arm_const_steps(armword_t val) {
+ int shift, steps = 0;
+
+ while (val != 0) {
+ shift = (arm_bsf(val) - 1) & (~1);
+ val &= ~(0xFF << shift);
+ ++steps;
+ }
+ return steps;
+}
+
+
+/*
+ * ARM cannot load arbitrary 32-bit constants directly into registers;
+ * widely used work-around for this is to store constants into a
+ * PC-addressable pool and use LDR instruction with PC-relative address
+ * to load constant into register. Easiest way to implement this is to
+ * embed constant inside a function with unconditional branch around it.
+ * The above method is not used at the moment.
+ * This routine always emits sequence of instructions to generate
+ * requested constant. In the worst case it takes 4 instructions to
+ * synthesize a constant - 1 MOV and 3 subsequent ORRs.
+ */
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond) {
+ int mov_op;
+ int step_op;
+ int snip;
+ int shift = calc_arm_mov_const_shift(imm32);
+
+ if ((shift & 0x80000001) != 1) {
+ if (shift >= 0) {
+ ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift, cond);
+ } else {
+ ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2), cond);
+ }
+ } else {
+ mov_op = ARMOP_MOV;
+ step_op = ARMOP_ORR;
+
+ if (arm_const_steps(imm32) > arm_const_steps(~imm32)) {
+ mov_op = ARMOP_MVN;
+ step_op = ARMOP_SUB;
+ imm32 = ~imm32;
+ }
+
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond));
+
+ while ((imm32 ^= snip) != 0) {
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond));
+ }
+ }
+
+ return p;
+}
+
+
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32) {
+ return arm_mov_reg_imm32_cond(p, reg, imm32, ARMCOND_AL);
+}
+
+
+
diff --git a/src/arch/arm/arm-codegen.h b/src/arch/arm/arm-codegen.h
new file mode 100644
index 0000000..d4d7f7c
--- /dev/null
+++ b/src/arch/arm/arm-codegen.h
@@ -0,0 +1,1127 @@
+/*
+ * arm-codegen.h
+ * Copyright (c) 2002-2003 Sergey Chaban <serge@wildwestsoftware.com>
+ * Copyright 2005-2011 Novell Inc
+ * Copyright 2011 Xamarin Inc
+ */
+
+
+#ifndef ARM_H
+#define ARM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned int arminstr_t;
+typedef unsigned int armword_t;
+
+/* Helper functions */
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size);
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs);
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs);
+int arm_is_power_of_2(armword_t val);
+int calc_arm_mov_const_shift(armword_t val);
+int is_arm_const(armword_t val);
+int arm_bsf(armword_t val);
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond);
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32);
+
+
+
+#if defined(_MSC_VER) || defined(__CC_NORCROFT)
+ void __inline _arm_emit(arminstr_t** p, arminstr_t i) {**p = i; (*p)++;}
+# define ARM_EMIT(p, i) _arm_emit((arminstr_t**)&p, (arminstr_t)(i))
+#else
+# define ARM_EMIT(p, i) do { arminstr_t *__ainstrp = (void*)(p); *__ainstrp = (arminstr_t)(i); (p) = (void*)(__ainstrp+1);} while (0)
+#endif
+
+#if defined(_MSC_VER) && !defined(ARM_NOIASM)
+# define ARM_IASM(_expr) __emit (_expr)
+#else
+# define ARM_IASM(_expr)
+#endif
+
+/* even_scale = rot << 1 */
+#define ARM_SCALE(imm8, even_scale) ( ((imm8) >> (even_scale)) | ((imm8) << (32 - even_scale)) )
+
+
+
+typedef enum {
+ ARMREG_R0 = 0,
+ ARMREG_R1,
+ ARMREG_R2,
+ ARMREG_R3,
+ ARMREG_R4,
+ ARMREG_R5,
+ ARMREG_R6,
+ ARMREG_R7,
+ ARMREG_R8,
+ ARMREG_R9,
+ ARMREG_R10,
+ ARMREG_R11,
+ ARMREG_R12,
+ ARMREG_R13,
+ ARMREG_R14,
+ ARMREG_R15,
+
+
+ /* aliases */
+ /* args */
+ ARMREG_A1 = ARMREG_R0,
+ ARMREG_A2 = ARMREG_R1,
+ ARMREG_A3 = ARMREG_R2,
+ ARMREG_A4 = ARMREG_R3,
+
+ /* local vars */
+ ARMREG_V1 = ARMREG_R4,
+ ARMREG_V2 = ARMREG_R5,
+ ARMREG_V3 = ARMREG_R6,
+ ARMREG_V4 = ARMREG_R7,
+ ARMREG_V5 = ARMREG_R8,
+ ARMREG_V6 = ARMREG_R9,
+ ARMREG_V7 = ARMREG_R10,
+
+ ARMREG_FP = ARMREG_R11,
+ ARMREG_IP = ARMREG_R12,
+ ARMREG_SP = ARMREG_R13,
+ ARMREG_LR = ARMREG_R14,
+ ARMREG_PC = ARMREG_R15,
+
+ /* co-processor */
+ ARMREG_CR0 = 0,
+ ARMREG_CR1,
+ ARMREG_CR2,
+ ARMREG_CR3,
+ ARMREG_CR4,
+ ARMREG_CR5,
+ ARMREG_CR6,
+ ARMREG_CR7,
+ ARMREG_CR8,
+ ARMREG_CR9,
+ ARMREG_CR10,
+ ARMREG_CR11,
+ ARMREG_CR12,
+ ARMREG_CR13,
+ ARMREG_CR14,
+ ARMREG_CR15,
+
+ /* XScale: acc0 on CP0 */
+ ARMREG_ACC0 = ARMREG_CR0,
+
+ ARMREG_MAX = ARMREG_R15
+} ARMReg;
+
+/* number of argument registers */
+#define ARM_NUM_ARG_REGS 4
+
+/* bitvector for all argument regs (A1-A4) */
+#define ARM_ALL_ARG_REGS \
+ (1 << ARMREG_A1) | (1 << ARMREG_A2) | (1 << ARMREG_A3) | (1 << ARMREG_A4)
+
+
+typedef enum {
+ ARMCOND_EQ = 0x0, /* Equal; Z = 1 */
+ ARMCOND_NE = 0x1, /* Not equal, or unordered; Z = 0 */
+ ARMCOND_CS = 0x2, /* Carry set; C = 1 */
+ ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same; */
+ ARMCOND_CC = 0x3, /* Carry clear; C = 0 */
+ ARMCOND_LO = ARMCOND_CC, /* Unsigned lower */
+ ARMCOND_MI = 0x4, /* Negative; N = 1 */
+ ARMCOND_PL = 0x5, /* Positive or zero; N = 0 */
+ ARMCOND_VS = 0x6, /* Overflow; V = 1 */
+ ARMCOND_VC = 0x7, /* No overflow; V = 0 */
+ ARMCOND_HI = 0x8, /* Unsigned higher; C = 1 && Z = 0 */
+ ARMCOND_LS = 0x9, /* Unsigned lower or same; C = 0 || Z = 1 */
+ ARMCOND_GE = 0xA, /* Signed greater than or equal; N = V */
+ ARMCOND_LT = 0xB, /* Signed less than; N != V */
+ ARMCOND_GT = 0xC, /* Signed greater than; Z = 0 && N = V */
+ ARMCOND_LE = 0xD, /* Signed less than or equal; Z = 1 && N != V */
+ ARMCOND_AL = 0xE, /* Always */
+ ARMCOND_NV = 0xF, /* Never */
+
+ ARMCOND_SHIFT = 28
+} ARMCond;
+
+#define ARMCOND_MASK (ARMCOND_NV << ARMCOND_SHIFT)
+
+#define ARM_DEF_COND(cond) (((cond) & 0xF) << ARMCOND_SHIFT)
+
+
+
+typedef enum {
+ ARMSHIFT_LSL = 0,
+ ARMSHIFT_LSR = 1,
+ ARMSHIFT_ASR = 2,
+ ARMSHIFT_ROR = 3,
+
+ ARMSHIFT_ASL = ARMSHIFT_LSL
+ /* rrx = (ror, 1) */
+} ARMShiftType;
+
+
+typedef struct {
+ armword_t PSR_c : 8;
+ armword_t PSR_x : 8;
+ armword_t PSR_s : 8;
+ armword_t PSR_f : 8;
+} ARMPSR;
+
+typedef enum {
+ ARMOP_AND = 0x0,
+ ARMOP_EOR = 0x1,
+ ARMOP_SUB = 0x2,
+ ARMOP_RSB = 0x3,
+ ARMOP_ADD = 0x4,
+ ARMOP_ADC = 0x5,
+ ARMOP_SBC = 0x6,
+ ARMOP_RSC = 0x7,
+ ARMOP_TST = 0x8,
+ ARMOP_TEQ = 0x9,
+ ARMOP_CMP = 0xa,
+ ARMOP_CMN = 0xb,
+ ARMOP_ORR = 0xc,
+ ARMOP_MOV = 0xd,
+ ARMOP_BIC = 0xe,
+ ARMOP_MVN = 0xf,
+
+
+ /* not really opcodes */
+
+ ARMOP_STR = 0x0,
+ ARMOP_LDR = 0x1,
+
+ /* ARM2+ */
+ ARMOP_MUL = 0x0, /* Rd := Rm*Rs */
+ ARMOP_MLA = 0x1, /* Rd := (Rm*Rs)+Rn */
+
+ /* ARM3M+ */
+ ARMOP_UMULL = 0x4,
+ ARMOP_UMLAL = 0x5,
+ ARMOP_SMULL = 0x6,
+ ARMOP_SMLAL = 0x7,
+
+ /* for data transfers with register offset */
+ ARM_UP = 1,
+ ARM_DOWN = 0
+} ARMOpcode;
+
+typedef enum {
+ THUMBOP_AND = 0,
+ THUMBOP_EOR = 1,
+ THUMBOP_LSL = 2,
+ THUMBOP_LSR = 3,
+ THUMBOP_ASR = 4,
+ THUMBOP_ADC = 5,
+ THUMBOP_SBC = 6,
+ THUMBOP_ROR = 7,
+ THUMBOP_TST = 8,
+ THUMBOP_NEG = 9,
+ THUMBOP_CMP = 10,
+ THUMBOP_CMN = 11,
+ THUMBOP_ORR = 12,
+ THUMBOP_MUL = 13,
+ THUMBOP_BIC = 14,
+ THUMBOP_MVN = 15,
+ THUMBOP_MOV = 16,
+ THUMBOP_CMPI = 17,
+ THUMBOP_ADD = 18,
+ THUMBOP_SUB = 19,
+ THUMBOP_CMPH = 19,
+ THUMBOP_MOVH = 20
+} ThumbOpcode;
+
+
+/* Generic form - all ARM instructions are conditional. */
+typedef struct {
+ arminstr_t icode : 28;
+ arminstr_t cond : 4;
+} ARMInstrGeneric;
+
+
+
+/* Branch or Branch with Link instructions. */
+typedef struct {
+ arminstr_t offset : 24;
+ arminstr_t link : 1;
+ arminstr_t tag : 3; /* 1 0 1 */
+ arminstr_t cond : 4;
+} ARMInstrBR;
+
+#define ARM_BR_ID 5
+#define ARM_BR_MASK 7 << 25
+#define ARM_BR_TAG ARM_BR_ID << 25
+
+#define ARM_DEF_BR(offs, l, cond) ((offs) | ((l) << 24) | (ARM_BR_TAG) | (cond << ARMCOND_SHIFT))
+
+/* branch */
+#define ARM_B_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 0, cond))
+#define ARM_B(p, offs) ARM_B_COND((p), ARMCOND_AL, (offs))
+/* branch with link */
+#define ARM_BL_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 1, cond))
+#define ARM_BL(p, offs) ARM_BL_COND((p), ARMCOND_AL, (offs))
+
+#define ARM_DEF_BX(reg,sub,cond) (0x12fff << 8 | (reg) | ((sub) << 4) | ((cond) << ARMCOND_SHIFT))
+
+#define ARM_BX_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 1, cond))
+#define ARM_BX(p, reg) ARM_BX_COND((p), ARMCOND_AL, (reg))
+
+#define ARM_BLX_REG_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 3, cond))
+#define ARM_BLX_REG(p, reg) ARM_BLX_REG_COND((p), ARMCOND_AL, (reg))
+
+/* Data Processing Instructions - there are 3 types. */
+
+typedef struct {
+ arminstr_t imm : 8;
+ arminstr_t rot : 4;
+} ARMDPI_op2_imm;
+
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag : 1; /* 0 - immediate shift, 1 - reg shift */
+ arminstr_t type : 2; /* shift type - logical, arithmetic, rotate */
+} ARMDPI_op2_reg_shift;
+
+
+/* op2 is reg shift by imm */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t shift : 5;
+ } imm;
+} ARMDPI_op2_reg_imm;
+
+/* op2 is reg shift by reg */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t pad : 1; /* always 0, to differentiate from HXFER etc. */
+ arminstr_t rs : 4;
+ } reg;
+} ARMDPI_op2_reg_reg;
+
+/* Data processing instrs */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+
+ ARMDPI_op2_reg_shift op2_reg;
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ ARMDPI_op2_reg_reg op2_reg_reg;
+
+ struct {
+ arminstr_t op2 : 12; /* raw operand 2 */
+ arminstr_t rd : 4; /* destination reg */
+ arminstr_t rn : 4; /* first operand reg */
+ arminstr_t s : 1; /* S-bit controls PSR update */
+ arminstr_t opcode : 4; /* arithmetic/logic operation */
+ arminstr_t type : 1; /* type of op2, 0 = register, 1 = immediate */
+ arminstr_t tag : 2; /* 0 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrDPI;
+
+#define ARM_DPI_ID 0
+#define ARM_DPI_MASK 3 << 26
+#define ARM_DPI_TAG ARM_DPI_ID << 26
+
+#define ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, cond) \
+ ((imm8) & 0xFF) | \
+ (((rot) & 0xF) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (1 << 25) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_DEF_DPI_IMM(imm8, rot, rd, rn, s, op) \
+ ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, ARMCOND_AL)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+
+
+#define ARM_DEF_DPI_REG_IMMSHIFT_COND(rm, shift_type, imm_shift, rd, rn, s, op, cond) \
+ (rm) | \
+ ((shift_type & 3) << 5) | \
+ (((imm_shift) & 0x1F) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_IASM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM_EMIT(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+
+/* Rd := Rn op (Rm shift_type Rs) */
+#define ARM_DEF_DPI_REG_REGSHIFT_COND(rm, shift_type, rs, rd, rn, s, op, cond) \
+ (rm) | \
+ (1 << 4) | \
+ ((shift_type & 3) << 5) | \
+ ((rs) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+
+
+/* Multiple register transfer. */
+typedef struct {
+ arminstr_t reg_list : 16; /* bitfield */
+ arminstr_t rn : 4; /* base reg */
+ arminstr_t ls : 1; /* load(1)/store(0) */
+ arminstr_t wb : 1; /* write-back "!" */
+ arminstr_t s : 1; /* restore PSR, force user bit */
+ arminstr_t u : 1; /* up/down */
+ arminstr_t p : 1; /* pre(1)/post(0) index */
+ arminstr_t tag : 3; /* 1 0 0 */
+ arminstr_t cond : 4;
+} ARMInstrMRT;
+
+#define ARM_MRT_ID 4
+#define ARM_MRT_MASK 7 << 25
+#define ARM_MRT_TAG ARM_MRT_ID << 25
+
+#define ARM_DEF_MRT(regs, rn, l, w, s, u, p, cond) \
+ (regs) | \
+ (rn << 16) | \
+ (l << 20) | \
+ (w << 21) | \
+ (s << 22) | \
+ (u << 23) | \
+ (p << 24) | \
+ (ARM_MRT_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_LDM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_STM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 0, 0, 0, 1, 0, ARMCOND_AL))
+
+/* stmdb sp!, {regs} */
+#define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+#define ARM_IASM_PUSH(regs) ARM_IASM(ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+
+/* ldmia sp!, {regs} */
+#define ARM_POP(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+
+/* ldmia sp, {regs} ; (no write-back) */
+#define ARM_POP_NWB(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP_NWB(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+
+#define ARM_PUSH1(p, r1) ARM_PUSH(p, (1 << r1))
+#define ARM_PUSH2(p, r1, r2) ARM_PUSH(p, (1 << r1) | (1 << r2))
+#define ARM_PUSH3(p, r1, r2, r3) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_PUSH4(p, r1, r2, r3, r4) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_PUSH5(p, r1, r2, r3, r4, r5) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_PUSH6(p, r1, r2, r3, r4, r5, r6) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_PUSH7(p, r1, r2, r3, r4, r5, r6, r7) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_PUSH8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+
+#define ARM_POP8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+#define ARM_POP7(p, r1, r2, r3, r4, r5, r6, r7) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_POP6(p, r1, r2, r3, r4, r5, r6) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_POP5(p, r1, r2, r3, r4, r5) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_POP4(p, r1, r2, r3, r4) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_POP3(p, r1, r2, r3) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_POP2(p, r1, r2) ARM_POP(p, (1 << r1) | (1 << r2))
+#define ARM_POP1(p, r1) ARM_POP(p, (1 << r1))
+
+
+/* Multiply instructions */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 4; /* 9 */
+ arminstr_t rs : 4;
+ arminstr_t rn : 4;
+ arminstr_t rd : 4;
+ arminstr_t s : 1;
+ arminstr_t opcode : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrMul;
+
+#define ARM_MUL_ID 0
+#define ARM_MUL_ID2 9
+#define ARM_MUL_MASK ((0xF << 24) | (0xF << 4))
+#define ARM_MUL_TAG ((ARM_MUL_ID << 24) | (ARM_MUL_ID2 << 4))
+
+#define ARM_DEF_MUL_COND(op, rd, rm, rs, rn, s, cond) \
+ (rm) | \
+ ((rs) << 8) | \
+ ((rn) << 12) | \
+ ((rd) << 16) | \
+ ((s & 1) << 17) | \
+ ((op & 7) << 18) | \
+ ARM_MUL_TAG | \
+ ARM_DEF_COND(cond)
+
+/* Rd := (Rm * Rs)[31:0]; 32 x 32 -> 32 */
+#define ARM_MUL_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_MUL(p, rd, rm, rs) \
+ ARM_MUL_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MULS_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_MULS(p, rd, rm, rs) \
+ ARM_MULS_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MUL_REG_REG(p, rd, rm, rs) ARM_MUL(p, rd, rm, rs)
+#define ARM_MULS_REG_REG(p, rd, rm, rs) ARM_MULS(p, rd, rm, rs)
+
+/* inline */
+#define ARM_IASM_MUL_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_IASM_MUL(rd, rm, rs) \
+ ARM_IASM_MUL_COND(rd, rm, rs, ARMCOND_AL)
+#define ARM_IASM_MULS_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_IASM_MULS(rd, rm, rs) \
+ ARM_IASM_MULS_COND(rd, rm, rs, ARMCOND_AL)
+
+
+/* Rd := (Rm * Rs) + Rn; 32x32+32->32 */
+#define ARM_MLA_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_MLA(p, rd, rm, rs, rn) \
+ ARM_MLA_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_MLAS_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_MLAS(p, rd, rm, rs, rn) \
+ ARM_MLAS_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+
+/* inline */
+#define ARM_IASM_MLA_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_IASM_MLA(rd, rm, rs, rn) \
+ ARM_IASM_MLA_COND(rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_IASM_MLAS_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_IASM_MLAS(rd, rm, rs, rn) \
+ ARM_IASM_MLAS_COND(rd, rm, rs, rn, ARMCOND_AL)
+
+
+
+/* Word/byte transfer */
+typedef union {
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ struct {
+ arminstr_t op2_imm : 12;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t b : 1;
+ arminstr_t u : 1; /* down(0) / up(1) */
+ arminstr_t p : 1; /* post-index(0) / pre-index(1) */
+ arminstr_t type : 1; /* imm(0) / register(1) */
+ arminstr_t tag : 2; /* 0 1 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrWXfer;
+
+#define ARM_WXFER_ID 1
+#define ARM_WXFER_MASK 3 << 26
+#define ARM_WXFER_TAG ARM_WXFER_ID << 26
+
+
+#define ARM_DEF_WXFER_IMM(imm12, rd, rn, ls, wb, b, p, cond) \
+ ((((int)imm12) < 0) ? -(int)(imm12) : (imm12)) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ (((int)(imm12) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_WXFER_MAX_OFFS 0xFFF
+
+/* this macro checks for imm12 bounds */
+#define ARM_EMIT_WXFER_IMM(ptr, imm12, rd, rn, ls, wb, b, p, cond) \
+ do { \
+ int _imm12 = (int)(imm12) < -ARM_WXFER_MAX_OFFS \
+ ? -ARM_WXFER_MAX_OFFS \
+ : (int)(imm12) > ARM_WXFER_MAX_OFFS \
+ ? ARM_WXFER_MAX_OFFS \
+ : (int)(imm12); \
+ ARM_EMIT((ptr), \
+ ARM_DEF_WXFER_IMM(_imm12, (rd), (rn), (ls), (wb), (b), (p), (cond))); \
+ } while (0)
+
+
+/* LDRx */
+/* immediate offset, post-index */
+#define ARM_LDR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 0, cond))
+
+#define ARM_LDR_IMM_POST(p, rd, rn, imm) ARM_LDR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 0, cond))
+
+#define ARM_LDRB_IMM_POST(p, rd, rn, imm) ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_LDR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+
+#define ARM_LDR_IMM(p, rd, rn, imm) ARM_LDR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+
+#define ARM_LDRB_IMM(p, rd, rn, imm) ARM_LDRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* STRx */
+/* immediate offset, post-index */
+#define ARM_STR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 0, cond))
+
+#define ARM_STR_IMM_POST(p, rd, rn, imm) ARM_STR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 0, cond))
+
+#define ARM_STRB_IMM_POST(p, rd, rn, imm) ARM_STRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_STR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)
+/* ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)) */
+
+#define ARM_STR_IMM(p, rd, rn, imm) ARM_STR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+
+#define ARM_STRB_IMM(p, rd, rn, imm) ARM_STRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* write-back */
+#define ARM_STR_IMM_WB_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 1, 0, 1, cond)
+#define ARM_STR_IMM_WB(p, rd, rn, imm) ARM_STR_IMM_WB_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, u, p, cond) \
+ (rm) | \
+ ((shift_type) << 5) | \
+ ((shift) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ (1 << 25) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_UP, p, cond)
+#define ARM_DEF_WXFER_REG_MINUS_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_DOWN, p, cond)
+
+
+#define ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+#define ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDR_REG_REG(p, rd, rn, rm) \
+ ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+#define ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDRB_REG_REG(p, rd, rn, rm) \
+ ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 0, 1, cond))
+#define ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STR_REG_REG(p, rd, rn, rm) \
+ ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+/* zero-extend */
+#define ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+#define ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STRB_REG_REG(p, rd, rn, rm) \
+ ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+
+/* ARMv4+ */
+/* Half-word or byte (signed) transfer. */
+typedef struct {
+ arminstr_t rm : 4; /* imm_lo */
+ arminstr_t tag3 : 1; /* 1 */
+ arminstr_t h : 1; /* half-word or byte */
+ arminstr_t s : 1; /* sign-extend or zero-extend */
+ arminstr_t tag2 : 1; /* 1 */
+ arminstr_t imm_hi : 4;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t type : 1; /* imm(1) / reg(0) */
+ arminstr_t u : 1; /* +- */
+ arminstr_t p : 1; /* pre/post-index */
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrHXfer;
+
+#define ARM_HXFER_ID 0
+#define ARM_HXFER_ID2 1
+#define ARM_HXFER_ID3 1
+#define ARM_HXFER_MASK ((0x7 << 25) | (0x9 << 4))
+#define ARM_HXFER_TAG ((ARM_HXFER_ID << 25) | (ARM_HXFER_ID2 << 7) | (ARM_HXFER_ID3 << 4))
+
+#define ARM_DEF_HXFER_IMM_COND(imm, h, s, rd, rn, ls, wb, p, cond) \
+ ((imm) < 0?(-(imm)) & 0xF:(imm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((imm) < 0?((-(imm)) << 4) & 0xF00:((imm) << 4) & 0xF00) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (1 << 22) | \
+ (((int)(imm) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_LDRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_IMM(p, rd, rn, imm) \
+ ARM_LDRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_IMM(p, rd, rn, imm) \
+ ARM_LDRSH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_IMM(p, rd, rn, imm) \
+ ARM_LDRSB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_STRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_IMM(p, rd, rn, imm) \
+ ARM_STRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, u, p, cond) \
+ ((rm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (0 << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_HXFER_REG_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_UP, p, cond)
+#define ARM_DEF_HXFER_REG_MINUS_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_DOWN, p, cond)
+
+#define ARM_LDRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_REG_REG(p, rd, rm, rn) ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+#define ARM_STRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_REG_REG(p, rd, rm, rn) \
+ ARM_STRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+
+
+/* Swap */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag3 : 8; /* 0x9 */
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t tag2 : 2;
+ arminstr_t b : 1;
+ arminstr_t tag : 5; /* 0x2 */
+ arminstr_t cond : 4;
+} ARMInstrSwap;
+
+#define ARM_SWP_ID 2
+#define ARM_SWP_ID2 9
+#define ARM_SWP_MASK ((0x1F << 23) | (3 << 20) | (0xFF << 4))
+#define ARM_SWP_TAG ((ARM_SWP_ID << 23) | (ARM_SWP_ID2 << 4))
+
+
+
+/* Software interrupt */
+typedef struct {
+ arminstr_t num : 24;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrSWI;
+
+#define ARM_SWI_ID 0xF
+#define ARM_SWI_MASK (0xF << 24)
+#define ARM_SWI_TAG (ARM_SWI_ID << 24)
+
+
+
+/* Co-processor Data Processing */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1; /* 0 */
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4; /* CP number */
+ arminstr_t crd : 4;
+ arminstr_t crn : 4;
+ arminstr_t op : 4;
+ arminstr_t tag : 4; /* 0xE */
+ arminstr_t cond : 4;
+} ARMInstrCDP;
+
+#define ARM_CDP_ID 0xE
+#define ARM_CDP_ID2 0
+#define ARM_CDP_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CDP_TAG ((ARM_CDP_ID << 24) | (ARM_CDP_ID2 << 4))
+
+
+/* Co-processor Data Transfer (ldc/stc) */
+typedef struct {
+ arminstr_t offs : 8;
+ arminstr_t cpn : 4;
+ arminstr_t crd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t n : 1;
+ arminstr_t u : 1;
+ arminstr_t p : 1;
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrCDT;
+
+#define ARM_CDT_ID 6
+#define ARM_CDT_MASK (7 << 25)
+#define ARM_CDT_TAG (ARM_CDT_ID << 25)
+
+
+/* Co-processor Register Transfer (mcr/mrc) */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1;
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4;
+ arminstr_t rd : 4;
+ arminstr_t crn : 4;
+ arminstr_t ls : 1;
+ arminstr_t op1 : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrCRT;
+
+#define ARM_CRT_ID 0xE
+#define ARM_CRT_ID2 0x1
+#define ARM_CRT_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CRT_TAG ((ARM_CRT_ID << 24) | (ARM_CRT_ID2 << 4))
+
+/* Move register to PSR. */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+ struct {
+ arminstr_t rm : 4;
+ arminstr_t pad : 8; /* 0 */
+ arminstr_t tag4 : 4; /* 0xF */
+ arminstr_t fld : 4;
+ arminstr_t tag3 : 2; /* 0x2 */
+ arminstr_t sel : 1;
+ arminstr_t tag2 : 2; /* 0x2 */
+ arminstr_t type : 1;
+ arminstr_t tag : 2; /* 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrMSR;
+
+#define ARM_MSR_ID 0
+#define ARM_MSR_ID2 2
+#define ARM_MSR_ID3 2
+#define ARM_MSR_ID4 0xF
+#define ARM_MSR_MASK ((3 << 26) | \
+ (3 << 23) | \
+ (3 << 20) | \
+ (0xF << 12))
+#define ARM_MSR_TAG ((ARM_MSR_ID << 26) | \
+ (ARM_MSR_ID2 << 23) | \
+ (ARM_MSR_ID3 << 20) | \
+ (ARM_MSR_ID4 << 12))
+
+
+/* Move PSR to register. */
+typedef struct {
+ arminstr_t tag3 : 12;
+ arminstr_t rd : 4;
+ arminstr_t tag2 : 6;
+ arminstr_t sel : 1; /* CPSR | SPSR */
+ arminstr_t tag : 5;
+ arminstr_t cond : 4;
+} ARMInstrMRS;
+
+#define ARM_MRS_ID 2
+#define ARM_MRS_ID2 0xF
+#define ARM_MRS_ID3 0
+#define ARM_MRS_MASK ((0x1F << 23) | (0x3F << 16) | 0xFFF)
+#define ARM_MRS_TAG ((ARM_MRS_ID << 23) | (ARM_MRS_ID2 << 16) | ARM_MRS_ID3)
+
+
+
+#include "mono/arch/arm/arm_dpimacros.h"
+
+#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0)
+
+
+#define ARM_SHL_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHL_IMM(p, rd, rm, imm) \
+ ARM_SHL_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHLS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHLS_IMM(p, rd, rm, imm) \
+ ARM_SHLS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHR_IMM(p, rd, rm, imm) \
+ ARM_SHR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHRS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHRS_IMM(p, rd, rm, imm) \
+ ARM_SHRS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SAR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SAR_IMM(p, rd, rm, imm) \
+ ARM_SAR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SARS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SARS_IMM(p, rd, rm, imm) \
+ ARM_SARS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_ROR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_ROR_IMM(p, rd, rm, imm) \
+ ARM_ROR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_RORS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_RORS_IMM(p, rd, rm, imm) \
+ ARM_RORS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHL_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHL_REG(p, rd, rm, rs) \
+ ARM_SHL_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHLS_REG(p, rd, rm, rs) \
+ ARM_SHLS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_REG(p, rd, rm, rs) ARM_SHLS_REG(p, rd, rm, rs)
+
+#define ARM_SHR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHR_REG(p, rd, rm, rs) \
+ ARM_SHR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHRS_REG(p, rd, rm, rs) \
+ ARM_SHRS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_REG(p, rd, rm, rs) ARM_SHRS_REG(p, rd, rm, rs)
+
+#define ARM_SAR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SAR_REG(p, rd, rm, rs) \
+ ARM_SAR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SARS_REG(p, rd, rm, rs) \
+ ARM_SARS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_REG(p, rd, rm, rs) ARM_SARS_REG(p, rd, rm, rs)
+
+#define ARM_ROR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_ROR_REG(p, rd, rm, rs) \
+ ARM_ROR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_RORS_REG(p, rd, rm, rs) \
+ ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs)
+
+#ifdef __native_client_codegen__
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE7FEDEF0)
+#else
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010)
+#endif
+#define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010)
+
+#define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1)
+#define ARM_DEC(p, reg) ARM_SUB_REG_IMM8(p, reg, reg, 1)
+
+#define ARM_MLS(p, rd, rn, rm, ra) ARM_EMIT((p), (ARMCOND_AL << 28) | (0x6 << 20) | ((rd) << 16) | ((ra) << 12) | ((rm) << 8) | (0x9 << 4) | ((rn) << 0))
+
+/* ARM V5 */
+
+/* Count leading zeros, CLZ{cond} Rd, Rm */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 8;
+ arminstr_t rd : 4;
+ arminstr_t tag : 12;
+ arminstr_t cond : 4;
+} ARMInstrCLZ;
+
+#define ARM_CLZ_ID 0x16F
+#define ARM_CLZ_ID2 0xF1
+#define ARM_CLZ_MASK ((0xFFF << 16) | (0xFF < 4))
+#define ARM_CLZ_TAG ((ARM_CLZ_ID << 16) | (ARM_CLZ_ID2 << 4))
+
+
+
+
+typedef union {
+ ARMInstrBR br;
+ ARMInstrDPI dpi;
+ ARMInstrMRT mrt;
+ ARMInstrMul mul;
+ ARMInstrWXfer wxfer;
+ ARMInstrHXfer hxfer;
+ ARMInstrSwap swp;
+ ARMInstrCDP cdp;
+ ARMInstrCDT cdt;
+ ARMInstrCRT crt;
+ ARMInstrSWI swi;
+ ARMInstrMSR msr;
+ ARMInstrMRS mrs;
+ ARMInstrCLZ clz;
+
+ ARMInstrGeneric generic;
+ arminstr_t raw;
+} ARMInstr;
+
+/* ARMv6t2 */
+
+#define ARM_MOVW_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (0 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVW_REG_IMM(p, rd, imm16) ARM_MOVW_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+#define ARM_MOVT_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (4 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVT_REG_IMM(p, rd, imm16) ARM_MOVT_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+/* MCR */
+#define ARM_DEF_MCR_COND(coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_DEF_COND ((cond)) | ((0xe << 24) | (((opc1) & 0x7) << 21) | (0 << 20) | (((crn) & 0xf) << 16) | (((rt) & 0xf) << 12) | (((coproc) & 0xf) << 8) | (((opc2) & 0x7) << 5) | (1 << 4) | (((crm) & 0xf) << 0))
+
+#define ARM_MCR_COND(p, coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_EMIT(p, ARM_DEF_MCR_COND ((coproc), (opc1), (rt), (crn), (crm), (opc2), (cond)))
+
+#define ARM_MCR(p, coproc, opc1, rt, crn, crm, opc2) \
+ ARM_MCR_COND ((p), (coproc), (opc1), (rt), (crn), (crm), (opc2), ARMCOND_AL)
+
+/* ARMv7VE */
+#define ARM_SDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x1 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_SDIV(p, rd, rn, rm) ARM_SDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+#define ARM_UDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x3 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_UDIV(p, rd, rn, rm) ARM_UDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+/* ARMv7 */
+
+typedef enum {
+ ARM_DMB_SY = 0xf,
+} ArmDmbFlags;
+
+#define ARM_DMB(p, option) ARM_EMIT ((p), ((0xf << 28) | (0x57 << 20) | (0xf << 16) | (0xf << 12) | (0x0 << 8) | (0x5 << 4) | ((option) << 0)))
+
+#define ARM_LDREX_REG(p, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x1 << 20) | ((rn) << 16) | ((rt) << 12)) | (0xf << 8) | (0x9 << 4) | 0xf << 0)
+
+#define ARM_STREX_REG(p, rd, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x0 << 20) | ((rn) << 16) | ((rd) << 12)) | (0xf << 8) | (0x9 << 4) | ((rt) << 0))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_H */
+
diff --git a/src/arch/arm/arm-dis.c b/src/arch/arm/arm-dis.c
new file mode 100644
index 0000000..5074f26
--- /dev/null
+++ b/src/arch/arm/arm-dis.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+
+#include <stdarg.h>
+
+#include "arm-dis.h"
+#include "arm-codegen.h"
+
+
+static ARMDis* gdisasm = NULL;
+
+static int use_reg_alias = 1;
+
+const static char* cond[] = {
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "", "nv"
+};
+
+const static char* ops[] = {
+ "and", "eor", "sub", "rsb", "add", "adc", "sbc", "rsc",
+ "tst", "teq", "cmp", "cmn", "orr", "mov", "bic", "mvn"
+};
+
+const static char* shift_types[] = {"lsl", "lsr", "asr", "ror"};
+
+const static char* mul_ops[] = {
+ "mul", "mla", "?", "?", "umull", "umlal", "smull", "smlal"
+};
+
+const static char* reg_alias[] = {
+ "a1", "a2", "a3", "a4",
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "fp", "ip", "sp", "lr", "pc"
+};
+
+const static char* msr_fld[] = {"f", "c", "x", "?", "s"};
+
+
+/* private functions prototypes (to keep compiler happy) */
+void chk_out(ARMDis* dis);
+void dump_reg(ARMDis* dis, int reg);
+void dump_creg(ARMDis* dis, int creg);
+void dump_reglist(ARMDis* dis, int reg_list);
+void init_gdisasm(void);
+
+void dump_br(ARMDis* dis, ARMInstr i);
+void dump_cdp(ARMDis* dis, ARMInstr i);
+void dump_cdt(ARMDis* dis, ARMInstr i);
+void dump_crt(ARMDis* dis, ARMInstr i);
+void dump_dpi(ARMDis* dis, ARMInstr i);
+void dump_hxfer(ARMDis* dis, ARMInstr i);
+void dump_mrs(ARMDis* dis, ARMInstr i);
+void dump_mrt(ARMDis* dis, ARMInstr i);
+void dump_msr(ARMDis* dis, ARMInstr i);
+void dump_mul(ARMDis* dis, ARMInstr i);
+void dump_swi(ARMDis* dis, ARMInstr i);
+void dump_swp(ARMDis* dis, ARMInstr i);
+void dump_wxfer(ARMDis* dis, ARMInstr i);
+void dump_clz(ARMDis* dis, ARMInstr i);
+
+
+/*
+void out(ARMDis* dis, const char* format, ...) {
+ va_list arglist;
+ va_start(arglist, format);
+ fprintf(dis->dis_out, format, arglist);
+ va_end(arglist);
+}
+*/
+
+
+void chk_out(ARMDis* dis) {
+ if (dis != NULL && dis->dis_out == NULL) dis->dis_out = stdout;
+}
+
+
+void armdis_set_output(ARMDis* dis, FILE* f) {
+ if (dis != NULL) {
+ dis->dis_out = f;
+ chk_out(dis);
+ }
+}
+
+FILE* armdis_get_output(ARMDis* dis) {
+ return (dis != NULL ? dis->dis_out : NULL);
+}
+
+
+
+
+void dump_reg(ARMDis* dis, int reg) {
+ reg &= 0xF;
+ if (!use_reg_alias || (reg > 3 && reg < 11)) {
+ fprintf(dis->dis_out, "r%d", reg);
+ } else {
+ fprintf(dis->dis_out, "%s", reg_alias[reg]);
+ }
+}
+
+void dump_creg(ARMDis* dis, int creg) {
+ if (dis != NULL) {
+ creg &= 0xF;
+ fprintf(dis->dis_out, "c%d", creg);
+ }
+}
+
+void dump_reglist(ARMDis* dis, int reg_list) {
+ int i = 0, j, n = 0;
+ int m1 = 1, m2, rn;
+ while (i < 16) {
+ if ((reg_list & m1) != 0) {
+ if (n != 0) fprintf(dis->dis_out, ", ");
+ n++;
+ dump_reg(dis, i);
+ for (j = i+1, rn = 0, m2 = m1<<1; j < 16; ++j, m2<<=1) {
+ if ((reg_list & m2) != 0) ++rn;
+ else break;
+ }
+ i+=rn;
+ if (rn > 1) {
+ fprintf(dis->dis_out, "-");
+ dump_reg(dis, i);
+ } else if (rn == 1) {
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i);
+ }
+ m1<<=(rn+1);
+ i++;
+ } else {
+ ++i;
+ m1<<=1;
+ }
+ }
+}
+
+
+void dump_br(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "b%s%s\t%x\t; %p -> %#x",
+ (i.br.link == 1) ? "l" : "",
+ cond[i.br.cond], i.br.offset, dis->pi, (int)dis->pi + 4*2 + ((int)(i.br.offset << 8) >> 6));
+}
+
+
+void dump_dpi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s", ops[i.dpi.all.opcode], cond[i.dpi.all.cond]);
+
+ if ((i.dpi.all.opcode < ARMOP_TST || i.dpi.all.opcode > ARMOP_CMN) && (i.dpi.all.s != 0)) {
+ fprintf(dis->dis_out, "s");
+ }
+
+ fprintf(dis->dis_out, "\t");
+
+ if ((i.dpi.all.opcode < ARMOP_TST) || (i.dpi.all.opcode > ARMOP_CMN)) {
+ /* for comparison operations Rd is ignored */
+ dump_reg(dis, i.dpi.all.rd);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if ((i.dpi.all.opcode != ARMOP_MOV) && (i.dpi.all.opcode != ARMOP_MVN)) {
+ /* for MOV/MVN Rn is ignored */
+ dump_reg(dis, i.dpi.all.rn);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if (i.dpi.all.type == 1) {
+ /* immediate */
+ if (i.dpi.op2_imm.rot != 0) {
+ fprintf(dis->dis_out, "#%d, %d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.rot << 1,
+ ARM_SCALE(i.dpi.op2_imm.imm, (i.dpi.op2_imm.rot << 1)) );
+ } else {
+ fprintf(dis->dis_out, "#%d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.imm);
+ }
+ } else {
+ /* reg-reg */
+ if (i.dpi.op2_reg.tag == 0) {
+ /* op2 is reg shift by imm */
+ dump_reg(dis, i.dpi.op2_reg_imm.r2.rm);
+ if (i.dpi.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.dpi.op2_reg_imm.r2.type], i.dpi.op2_reg_imm.imm.shift);
+ }
+ } else {
+ /* op2 is reg shift by reg */
+ dump_reg(dis, i.dpi.op2_reg_reg.r2.rm);
+ fprintf(dis->dis_out, " %s ", shift_types[i.dpi.op2_reg_reg.r2.type]);
+ dump_reg(dis, i.dpi.op2_reg_reg.reg.rs);
+ }
+
+ }
+}
+
+void dump_wxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.wxfer.all.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.wxfer.all.b == 0) ? "" : "b",
+ (i.wxfer.all.ls != 0 && i.wxfer.all.wb != 0) ? "t" : "");
+ dump_reg(dis, i.wxfer.all.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.wxfer.all.rn);
+ fprintf(dis->dis_out, "%s, ", (i.wxfer.all.p == 0) ? "]" : "");
+
+ if (i.wxfer.all.type == 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.wxfer.all.u == 0) ? "-" : "", i.wxfer.all.op2_imm);
+ } else {
+ dump_reg(dis, i.wxfer.op2_reg_imm.r2.rm);
+ if (i.wxfer.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.wxfer.op2_reg_imm.r2.type], i.wxfer.op2_reg_imm.imm.shift);
+ }
+ }
+
+ if (i.wxfer.all.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.wxfer.all.wb != 0) ? "!" : "");
+ }
+}
+
+void dump_hxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.hxfer.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.hxfer.s != 0) ? "s" : "",
+ (i.hxfer.h != 0) ? "h" : "b");
+ dump_reg(dis, i.hxfer.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.hxfer.rn);
+ fprintf(dis->dis_out, "%s, ", (i.hxfer.p == 0) ? "]" : "");
+
+ if (i.hxfer.type != 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.hxfer.u == 0) ? "-" : "", (i.hxfer.imm_hi << 4) | i.hxfer.rm);
+ } else {
+ dump_reg(dis, i.hxfer.rm);
+ }
+
+ if (i.hxfer.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.hxfer.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_mrt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t", (i.mrt.ls == 0) ? "stm" : "ldm", cond[i.mrt.cond],
+ (i.mrt.u == 0) ? "d" : "i", (i.mrt.p == 0) ? "a" : "b");
+ dump_reg(dis, i.mrt.rn);
+ fprintf(dis->dis_out, "%s, {", (i.mrt.wb != 0) ? "!" : "");
+ dump_reglist(dis, i.mrt.reg_list);
+ fprintf(dis->dis_out, "}");
+}
+
+
+void dump_swp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swp%s%s ", cond[i.swp.cond], (i.swp.b != 0) ? "b" : "");
+ dump_reg(dis, i.swp.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.swp.rm);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.swp.rn);
+ fprintf(dis->dis_out, "]");
+}
+
+
+void dump_mul(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\t", mul_ops[i.mul.opcode], cond[i.mul.cond], (i.mul.s != 0) ? "s" : "");
+ switch (i.mul.opcode) {
+ case ARMOP_MUL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ case ARMOP_MLA:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ break;
+ case ARMOP_UMULL:
+ case ARMOP_UMLAL:
+ case ARMOP_SMULL:
+ case ARMOP_SMLAL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ default:
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", i.raw);
+ break;
+ }
+}
+
+
+void dump_cdp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "cdp%s\tp%d, %d, ", cond[i.generic.cond], i.cdp.cpn, i.cdp.op);
+ dump_creg(dis, i.cdp.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crm);
+
+ if (i.cdp.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.cdp.op2);
+ }
+}
+
+
+void dump_cdt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\tp%d, ", (i.cdt.ls == 0) ? "stc" : "ldc",
+ cond[i.generic.cond], (i.cdt.n != 0) ? "l" : "", i.cdt.cpn);
+ dump_creg(dis, i.cdt.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.cdt.rn);
+
+ if (i.cdt.p == 0) {
+ fprintf(dis->dis_out, "]");
+ }
+
+ if (i.cdt.offs != 0) {
+ fprintf(dis->dis_out, ", #%d", i.cdt.offs);
+ }
+
+ if (i.cdt.p != 0) {
+ fprintf(dis->dis_out, "]%s", (i.cdt.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_crt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s\tp%d, %d, ", (i.crt.ls == 0) ? "mrc" : "mcr",
+ cond[i.generic.cond], i.crt.cpn, i.crt.op1);
+ dump_reg(dis, i.crt.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crm);
+
+ if (i.crt.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.crt.op2);
+ }
+}
+
+
+void dump_msr(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "msr%s\t%spsr_, ", cond[i.generic.cond],
+ (i.msr.all.sel == 0) ? "s" : "c");
+ if (i.msr.all.type == 0) {
+ /* reg */
+ fprintf(dis->dis_out, "%s, ", msr_fld[i.msr.all.fld]);
+ dump_reg(dis, i.msr.all.rm);
+ } else {
+ /* imm */
+ fprintf(dis->dis_out, "f, #%d", i.msr.op2_imm.imm << i.msr.op2_imm.rot);
+ }
+}
+
+
+void dump_mrs(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "mrs%s\t", cond[i.generic.cond]);
+ dump_reg(dis, i.mrs.rd);
+ fprintf(dis->dis_out, ", %spsr", (i.mrs.sel == 0) ? "s" : "c");
+}
+
+
+void dump_swi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swi%s\t%d", cond[i.generic.cond], i.swi.num);
+}
+
+
+void dump_clz(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "clz\t");
+ dump_reg(dis, i.clz.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.clz.rm);
+ fprintf(dis->dis_out, "\n");
+}
+
+
+
+void armdis_decode(ARMDis* dis, void* p, int size) {
+ int i;
+ arminstr_t* pi = (arminstr_t*)p;
+ ARMInstr instr;
+
+ if (dis == NULL) return;
+
+ chk_out(dis);
+
+ size/=sizeof(arminstr_t);
+
+ for (i=0; i<size; ++i) {
+ fprintf(dis->dis_out, "%p:\t%08x\t", pi, *pi);
+ dis->pi = pi;
+ instr.raw = *pi++;
+
+ if ((instr.raw & ARM_BR_MASK) == ARM_BR_TAG) {
+ dump_br(dis, instr);
+ } else if ((instr.raw & ARM_SWP_MASK) == ARM_SWP_TAG) {
+ dump_swp(dis, instr);
+ } else if ((instr.raw & ARM_MUL_MASK) == ARM_MUL_TAG) {
+ dump_mul(dis, instr);
+ } else if ((instr.raw & ARM_CLZ_MASK) == ARM_CLZ_TAG) {
+ dump_clz(dis, instr);
+ } else if ((instr.raw & ARM_WXFER_MASK) == ARM_WXFER_TAG) {
+ dump_wxfer(dis, instr);
+ } else if ((instr.raw & ARM_HXFER_MASK) == ARM_HXFER_TAG) {
+ dump_hxfer(dis, instr);
+ } else if ((instr.raw & ARM_DPI_MASK) == ARM_DPI_TAG) {
+ dump_dpi(dis, instr);
+ } else if ((instr.raw & ARM_MRT_MASK) == ARM_MRT_TAG) {
+ dump_mrt(dis, instr);
+ } else if ((instr.raw & ARM_CDP_MASK) == ARM_CDP_TAG) {
+ dump_cdp(dis, instr);
+ } else if ((instr.raw & ARM_CDT_MASK) == ARM_CDT_TAG) {
+ dump_cdt(dis, instr);
+ } else if ((instr.raw & ARM_CRT_MASK) == ARM_CRT_TAG) {
+ dump_crt(dis, instr);
+ } else if ((instr.raw & ARM_MSR_MASK) == ARM_MSR_TAG) {
+ dump_msr(dis, instr);
+ } else if ((instr.raw & ARM_MRS_MASK) == ARM_MRS_TAG) {
+ dump_mrs(dis, instr);
+ } else if ((instr.raw & ARM_SWI_MASK) == ARM_SWI_TAG) {
+ dump_swi(dis, instr);
+ } else {
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", instr.raw);
+ }
+
+ fprintf(dis->dis_out, "\n");
+ }
+}
+
+
+void armdis_open(ARMDis* dis, const char* dump_name) {
+ if (dis != NULL && dump_name != NULL) {
+ armdis_set_output(dis, fopen(dump_name, "w"));
+ }
+}
+
+
+void armdis_close(ARMDis* dis) {
+ if (dis->dis_out != NULL && dis->dis_out != stdout && dis->dis_out != stderr) {
+ fclose(dis->dis_out);
+ dis->dis_out = NULL;
+ }
+}
+
+
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size) {
+ armdis_open(dis, dump_name);
+ armdis_decode(dis, p, size);
+ armdis_close(dis);
+}
+
+
+void armdis_init(ARMDis* dis) {
+ if (dis != NULL) {
+ /* set to stdout */
+ armdis_set_output(dis, NULL);
+ }
+}
+
+
+
+
+void init_gdisasm() {
+ if (gdisasm == NULL) {
+ gdisasm = (ARMDis*)malloc(sizeof(ARMDis));
+ armdis_init(gdisasm);
+ }
+}
+
+void _armdis_set_output(FILE* f) {
+ init_gdisasm();
+ armdis_set_output(gdisasm, f);
+}
+
+FILE* _armdis_get_output() {
+ init_gdisasm();
+ return armdis_get_output(gdisasm);
+}
+
+void _armdis_decode(void* p, int size) {
+ init_gdisasm();
+ armdis_decode(gdisasm, p, size);
+}
+
+void _armdis_open(const char* dump_name) {
+ init_gdisasm();
+ armdis_open(gdisasm, dump_name);
+}
+
+void _armdis_close() {
+ init_gdisasm();
+ armdis_close(gdisasm);
+}
+
+void _armdis_dump(const char* dump_name, void* p, int size) {
+ init_gdisasm();
+ armdis_dump(gdisasm, dump_name, p, size);
+}
+
diff --git a/src/arch/arm/arm-dis.h b/src/arch/arm/arm-dis.h
new file mode 100644
index 0000000..8019499
--- /dev/null
+++ b/src/arch/arm/arm-dis.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#ifndef ARM_DIS
+#define ARM_DIS
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _ARMDis {
+ FILE* dis_out;
+ void* pi;
+} ARMDis;
+
+
+void _armdis_set_output(FILE* f);
+FILE* _armdis_get_output(void);
+void _armdis_decode(void* p, int size);
+void _armdis_open(const char* dump_name);
+void _armdis_close(void);
+void _armdis_dump(const char* dump_name, void* p, int size);
+
+
+void armdis_init(ARMDis* dis);
+void armdis_set_output(ARMDis* dis, FILE* f);
+FILE* armdis_get_output(ARMDis* dis);
+void armdis_decode(ARMDis* dis, void* p, int size);
+void armdis_open(ARMDis* dis, const char* dump_name);
+void armdis_close(ARMDis* dis);
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_DIS */
diff --git a/src/arch/arm/arm-vfp-codegen.h b/src/arch/arm/arm-vfp-codegen.h
new file mode 100644
index 0000000..8b56b00
--- /dev/null
+++ b/src/arch/arm/arm-vfp-codegen.h
@@ -0,0 +1,247 @@
+//
+// Copyright 2011 Xamarin Inc
+//
+
+#ifndef __MONO_ARM_VFP_CODEGEN_H__
+#define __MONO_ARM_VFP_CODEGEN_H__
+
+#include "arm-codegen.h"
+
+enum {
+ /* VFP registers */
+ ARM_VFP_F0,
+ ARM_VFP_F1,
+ ARM_VFP_F2,
+ ARM_VFP_F3,
+ ARM_VFP_F4,
+ ARM_VFP_F5,
+ ARM_VFP_F6,
+ ARM_VFP_F7,
+ ARM_VFP_F8,
+ ARM_VFP_F9,
+ ARM_VFP_F10,
+ ARM_VFP_F11,
+ ARM_VFP_F12,
+ ARM_VFP_F13,
+ ARM_VFP_F14,
+ ARM_VFP_F15,
+ ARM_VFP_F16,
+ ARM_VFP_F17,
+ ARM_VFP_F18,
+ ARM_VFP_F19,
+ ARM_VFP_F20,
+ ARM_VFP_F21,
+ ARM_VFP_F22,
+ ARM_VFP_F23,
+ ARM_VFP_F24,
+ ARM_VFP_F25,
+ ARM_VFP_F26,
+ ARM_VFP_F27,
+ ARM_VFP_F28,
+ ARM_VFP_F29,
+ ARM_VFP_F30,
+ ARM_VFP_F31,
+
+ ARM_VFP_D0 = ARM_VFP_F0,
+ ARM_VFP_D1 = ARM_VFP_F2,
+ ARM_VFP_D2 = ARM_VFP_F4,
+ ARM_VFP_D3 = ARM_VFP_F6,
+ ARM_VFP_D4 = ARM_VFP_F8,
+ ARM_VFP_D5 = ARM_VFP_F10,
+ ARM_VFP_D6 = ARM_VFP_F12,
+ ARM_VFP_D7 = ARM_VFP_F14,
+ ARM_VFP_D8 = ARM_VFP_F16,
+ ARM_VFP_D9 = ARM_VFP_F18,
+ ARM_VFP_D10 = ARM_VFP_F20,
+ ARM_VFP_D11 = ARM_VFP_F22,
+ ARM_VFP_D12 = ARM_VFP_F24,
+ ARM_VFP_D13 = ARM_VFP_F26,
+ ARM_VFP_D14 = ARM_VFP_F28,
+ ARM_VFP_D15 = ARM_VFP_F30,
+
+ ARM_VFP_COPROC_SINGLE = 10,
+ ARM_VFP_COPROC_DOUBLE = 11,
+
+#define ARM_VFP_OP(p,q,r,s) (((p) << 23) | ((q) << 21) | ((r) << 20) | ((s) << 6))
+#define ARM_VFP_OP2(Fn,N) (ARM_VFP_OP (1,1,1,1) | ((Fn) << 16) | ((N) << 7))
+
+ ARM_VFP_MUL = ARM_VFP_OP (0,1,0,0),
+ ARM_VFP_NMUL = ARM_VFP_OP (0,1,0,1),
+ ARM_VFP_ADD = ARM_VFP_OP (0,1,1,0),
+ ARM_VFP_SUB = ARM_VFP_OP (0,1,1,1),
+ ARM_VFP_DIV = ARM_VFP_OP (1,0,0,0),
+
+ ARM_VFP_CPY = ARM_VFP_OP2 (0,0),
+ ARM_VFP_ABS = ARM_VFP_OP2 (0,1),
+ ARM_VFP_NEG = ARM_VFP_OP2 (1,0),
+ ARM_VFP_SQRT = ARM_VFP_OP2 (1,1),
+ ARM_VFP_CMP = ARM_VFP_OP2 (4,0),
+ ARM_VFP_CMPE = ARM_VFP_OP2 (4,1),
+ ARM_VFP_CMPZ = ARM_VFP_OP2 (5,0),
+ ARM_VFP_CMPEZ = ARM_VFP_OP2 (5,1),
+ ARM_VFP_CVT = ARM_VFP_OP2 (7,1),
+ ARM_VFP_UITO = ARM_VFP_OP2 (8,0),
+ ARM_VFP_SITO = ARM_VFP_OP2 (8,1),
+ ARM_VFP_TOUI = ARM_VFP_OP2 (12,0),
+ ARM_VFP_TOSI = ARM_VFP_OP2 (13,0),
+ ARM_VFP_TOUIZ = ARM_VFP_OP2 (12,1),
+ ARM_VFP_TOSIZ = ARM_VFP_OP2 (13,1),
+
+ ARM_VFP_SID = 0,
+ ARM_VFP_SCR = 1 << 1,
+ ARM_VFP_EXC = 8 << 1
+};
+
+#define ARM_DEF_VFP_DYADIC(cond,cp,op,Fd,Fn,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_MONADIC(cond,cp,op,Fd,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_LSF(cond,cp,post,ls,wback,basereg,Fd,offset) \
+ ((offset) >= 0? (offset)>>2: -(offset)>>2) | \
+ (6 << 25) | \
+ ((cp) << 8) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ ((basereg) << 16) | \
+ ((ls) << 20) | \
+ ((wback) << 21) | \
+ (((offset) >= 0) << 23) | \
+ ((wback) << 21) | \
+ ((post) << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_CPT(cond,cp,op,L,Fn,Rd) \
+ (14 << 24) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((op) << 21) | \
+ ((L) << 20) | \
+ ((Rd) << 12) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ ARM_DEF_COND(cond)
+
+/* FP load and stores */
+#define ARM_FLDS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDS(p,freg,base,offset) \
+ ARM_FLDS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDD(p,freg,base,offset) \
+ ARM_FLDD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTS(p,freg,base,offset) \
+ ARM_FSTS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTD(p,freg,base,offset) \
+ ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_LDR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FLDMD(p,first_reg,nregs,base) \
+ ARM_FLDMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#define ARM_FSTMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_STR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FSTMD(p,first_reg,nregs,base) \
+ ARM_FSTMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#include <mono/arch/arm/arm_vfpmacros.h>
+
+/* coprocessor register transfer */
+#define ARM_FMSR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,0,(freg),(reg)))
+#define ARM_FMRS(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,1,(freg),(reg)))
+
+#define ARM_FMDLR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,0,(freg),(reg)))
+#define ARM_FMRDL(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,1,(freg),(reg)))
+#define ARM_FMDHR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,0,(freg),(reg)))
+#define ARM_FMRDH(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,1,(freg),(reg)))
+
+#define ARM_FMXR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,0,(freg),(reg)))
+#define ARM_FMRX(p,reg,fcreg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,1,(fcreg),(reg)))
+
+#define ARM_FMSTAT(p) \
+ ARM_FMRX((p),ARMREG_R15,ARM_VFP_SCR)
+
+#define ARM_DEF_MCRR(cond,cp,rn,rd,Fm,M) \
+ ((Fm) << 0) | \
+ (1 << 4) | \
+ ((M) << 5) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((2) << 21) | \
+ (12 << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMDRR(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_MCRR(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FMRRD(cond,cp,rn,rd,Dm,D) \
+ ((Dm) << 0) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((0xc5) << 20) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMRRD(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_FMRRD(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FUITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FUITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#endif /* __MONO_ARM_VFP_CODEGEN_H__ */
+
diff --git a/src/arch/arm/arm-wmmx.h b/src/arch/arm/arm-wmmx.h
new file mode 100644
index 0000000..427c4fc
--- /dev/null
+++ b/src/arch/arm/arm-wmmx.h
@@ -0,0 +1,177 @@
+/*
+ * ARM CodeGen
+ * XScale WirelessMMX extensions
+ * Copyright 2002 Wild West Software
+ */
+
+#ifndef __WMMX_H__
+#define __WMMX_H__ 1
+
+#if 0
+#include <arm-codegen.h>
+#endif
+
+#if defined(ARM_IASM)
+# define WM_ASM(_expr) ARM_IASM(_expr)
+#else
+# define WM_ASM(_expr) __emit (_expr)
+#endif
+
+#if defined(ARM_EMIT)
+# define WM_EMIT(p, i) ARM_EMIT(p, i)
+#else
+# define WM_EMIT(p, i)
+#endif
+
+enum {
+ WM_CC_EQ = 0x0,
+ WM_CC_NE = 0x1,
+ WM_CC_CS = 0x2,
+ WM_CC_HS = WM_CC_CS,
+ WM_CC_CC = 0x3,
+ WM_CC_LO = WM_CC_CC,
+ WM_CC_MI = 0x4,
+ WM_CC_PL = 0x5,
+ WM_CC_VS = 0x6,
+ WM_CC_VC = 0x7,
+ WM_CC_HI = 0x8,
+ WM_CC_LS = 0x9,
+ WM_CC_GE = 0xA,
+ WM_CC_LT = 0xB,
+ WM_CC_GT = 0xC,
+ WM_CC_LE = 0xD,
+ WM_CC_AL = 0xE,
+ WM_CC_NV = 0xF,
+ WM_CC_SHIFT = 28
+};
+
+#if defined(ARM_DEF_COND)
+# define WM_DEF_CC(_cc) ARM_DEF_COND(_cc)
+#else
+# define WM_DEF_CC(_cc) ((_cc & 0xF) << WM_CC_SHIFT)
+#endif
+
+
+enum {
+ WM_R0 = 0x0,
+ WM_R1 = 0x1,
+ WM_R2 = 0x2,
+ WM_R3 = 0x3,
+ WM_R4 = 0x4,
+ WM_R5 = 0x5,
+ WM_R6 = 0x6,
+ WM_R7 = 0x7,
+ WM_R8 = 0x8,
+ WM_R9 = 0x9,
+ WM_R10 = 0xA,
+ WM_R11 = 0xB,
+ WM_R12 = 0xC,
+ WM_R13 = 0xD,
+ WM_R14 = 0xE,
+ WM_R15 = 0xF,
+
+ WM_wR0 = 0x0,
+ WM_wR1 = 0x1,
+ WM_wR2 = 0x2,
+ WM_wR3 = 0x3,
+ WM_wR4 = 0x4,
+ WM_wR5 = 0x5,
+ WM_wR6 = 0x6,
+ WM_wR7 = 0x7,
+ WM_wR8 = 0x8,
+ WM_wR9 = 0x9,
+ WM_wR10 = 0xA,
+ WM_wR11 = 0xB,
+ WM_wR12 = 0xC,
+ WM_wR13 = 0xD,
+ WM_wR14 = 0xE,
+ WM_wR15 = 0xF
+};
+
+
+/*
+ * Qualifiers:
+ * H - 16-bit (HalfWord) SIMD
+ * W - 32-bit (Word) SIMD
+ * D - 64-bit (Double)
+ */
+enum {
+ WM_B = 0,
+ WM_H = 1,
+ WM_D = 2
+};
+
+/*
+ * B.2.3 Transfers From Coprocessor Register (MRC)
+ * Table B-5
+ */
+enum {
+ WM_TMRC_OP2 = 0,
+ WM_TMRC_CPNUM = 1,
+
+ WM_TMOVMSK_OP2 = 1,
+ WM_TMOVMSK_CPNUM = 0,
+
+ WM_TANDC_OP2 = 1,
+ WM_TANDC_CPNUM = 1,
+
+ WM_TORC_OP2 = 2,
+ WM_TORC_CPNUM = 1,
+
+ WM_TEXTRC_OP2 = 3,
+ WM_TEXTRC_CPNUM = 1,
+
+ WM_TEXTRM_OP2 = 3,
+ WM_TEXTRM_CPNUM = 0
+};
+
+
+/*
+ * TANDC<B,H,W>{Cond} R15
+ * Performs AND across the fields of the SIMD PSR register (wCASF) and sends the result
+ * to CPSR; can be performed after a Byte, Half-word or Word operation that sets the flags.
+ * NOTE: R15 is omitted from the macro declaration;
+ */
+#define DEF_WM_TNADC_CC(_q, _cc) WM_DEF_CC((_cc)) + ((_q) << 0x16) + 0xE13F130
+
+#define _WM_TNADC_CC(_q, _cc) WM_ASM(DEF_WM_TNADC_CC(_q, _cc))
+#define ARM_WM_TNADC_CC(_p, _q, _cc) WM_EMIT(_p, DEF_WM_TNADC_CC(_q, _cc))
+
+/* inline assembly */
+#define _WM_TNADC(_q) _WM_TNADC_CC((_q), WM_CC_AL)
+#define _WM_TNADCB() _WM_TNADC(WM_B)
+#define _WM_TNADCH() _WM_TNADC(WM_H)
+#define _WM_TNADCD() _WM_TNADC(WM_D)
+
+/* codegen */
+#define ARM_WM_TNADC(_p, _q) ARM_WM_TNADC_CC((_p), (_q), WM_CC_AL)
+#define ARM_WM_TNADCB(_p) ARM_WM_TNADC(_p, WM_B)
+#define ARM_WM_TNADCH(_p) ARM_WM_TNADC(_p, WM_H)
+#define ARM_WM_TNADCD(_p) ARM_WM_TNADC(_p, WM_D)
+
+
+/*
+ * TBCST<B,H,W>{Cond} wRd, Rn
+ * Broadcasts a value from the ARM Source reg (Rn) to every SIMD position
+ * in the WMMX Destination reg (wRd).
+ */
+#define DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn) \
+ WM_DEF_CC((_cc)) + ((_q) << 6) + ((_wrd) << 16) + ((_rn) << 12) + 0xE200010
+
+#define _WM_TBCST_CC(_q, _cc, _wrd, _rn) WM_ASM(DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+#define ARM_WM_TBCST_CC(_p, _q, _cc, _wrd, _rn) WM_EMIT(_p, DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+
+/* inline */
+#define _WM_TBCST(_q, _wrd, _rn) _WM_TBCST_CC(_q, WM_CC_AL, _wrd, _rn)
+#define _WM_TBCSTB(_wrd, _rn) _WM_TBCST(WM_B)
+#define _WM_TBCSTH(_wrd, _rn) _WM_TBCST(WM_H)
+#define _WM_TBCSTD(_wrd, _rn) _WM_TBCST(WM_D)
+
+/* codegen */
+#define ARM_WM_TBCST(_p, _q, _wrd, _rn) ARM_WM_TBCST_CC(_p, _q, WM_CC_AL, _wrd, _rn)
+#define ARM_WM_TBCSTB(_p, _wrd, _rn) _WM_TBCST(_p, WM_B)
+#define ARM_WM_TBCSTH(_p, _wrd, _rn) _WM_TBCST(_p, WM_H)
+#define ARM_WM_TBCSTD(_p, _wrd, _rn) _WM_TBCST(_p, WM_D)
+
+
+#endif /* __WMMX_H__ */
diff --git a/src/arch/arm/cmp_macros.th b/src/arch/arm/cmp_macros.th
new file mode 100644
index 0000000..cb2639d
--- /dev/null
+++ b/src/arch/arm/cmp_macros.th
@@ -0,0 +1,56 @@
+/* PSR := <Op> Rn, (imm8 ROR 2*rot) */
+#define ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rn, imm8) \
+ _<Op>_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, Rm */
+#define ARM_<Op>_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, 0, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, 0, rn, rm, cond)
+#define _<Op>_REG_REG(rn, rm) \
+ _<Op>_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, (Rm <shift_type> imm8) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/dpi_macros.th b/src/arch/arm/dpi_macros.th
new file mode 100644
index 0000000..be43d1f
--- /dev/null
+++ b/src/arch/arm/dpi_macros.th
@@ -0,0 +1,112 @@
+/* -- <Op> -- */
+
+/* Rd := Rn <Op> (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>S_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rd, rn, imm8) \
+ _<Op>_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _<Op>S_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(rd, rn, imm8) \
+ _<Op>S_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>_REG_REG(rd, rn, rm) \
+ _<Op>_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _<Op>S_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>S_REG_REG(rd, rn, rm) \
+ _<Op>S_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> imm_shift) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/dpiops.sh b/src/arch/arm/dpiops.sh
new file mode 100755
index 0000000..d3b93ff
--- /dev/null
+++ b/src/arch/arm/dpiops.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC"
+CMP_OPCODES="TST TEQ CMP CMN"
+MOV_OPCODES="MOV MVN"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+
+
+echo -e "/* Macros for DPI ops, auto-generated from template */\n"
+
+echo -e "\n/* mov/mvn */\n"
+gen "$MOV_OPCODES" mov_macros
+
+echo -e "\n/* DPIs, arithmetic and logical */\n"
+gen "$OPCODES" dpi_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* DPIs, comparison */\n"
+gen "$CMP_OPCODES" cmp_macros
+
+echo -e "\n/* end generated */\n"
diff --git a/src/arch/arm/mov_macros.th b/src/arch/arm/mov_macros.th
new file mode 100644
index 0000000..6bac290
--- /dev/null
+++ b/src/arch/arm/mov_macros.th
@@ -0,0 +1,121 @@
+/* Rd := imm8 ROR rot */
+#define ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>_REG_IMM(reg, imm8, rot) \
+ _<Op>_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>S_REG_IMM(reg, imm8, rot) \
+ _<Op>S_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>_REG_IMM8(reg, imm8) \
+ _<Op>_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(reg, imm8) \
+ _<Op>S_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>_REG_REG(rd, rm) \
+ _<Op>_REG_REG_COND(rd, rm, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>S_REG_REG(rd, rm) \
+ _<Op>S_REG_REG_COND(rd, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm <shift_type> imm_shift */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+
+/* Rd := (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/tramp.c b/src/arch/arm/tramp.c
new file mode 100644
index 0000000..f736c7a
--- /dev/null
+++ b/src/arch/arm/tramp.c
@@ -0,0 +1,710 @@
+/*
+ * Create trampolines to invoke arbitrary functions.
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ *
+ * Contributions by Malte Hildingson
+ */
+
+#include "arm-codegen.h"
+#include "arm-dis.h"
+
+#if defined(_WIN32_WCE) || defined (UNDER_CE)
+# include <windows.h>
+#else
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
+#if !defined(PLATFORM_MACOSX)
+#include <errno.h>
+
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+
+
+#if 0
+# define ARM_DUMP_DISASM 1
+#endif
+
+/* prototypes for private functions (to avoid compiler warnings) */
+void flush_icache (void);
+void* alloc_code_buff (int num_instr);
+
+
+
+/*
+ * The resulting function takes the form:
+ * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments);
+ * NOTE: all args passed in ARM registers (A1-A4),
+ * then copied to R4-R7 (see definitions below).
+ */
+
+#define REG_FUNC_ADDR ARMREG_R4
+#define REG_RETVAL ARMREG_R5
+#define REG_THIS ARMREG_R6
+#define REG_ARGP ARMREG_R7
+
+
+#define ARG_SIZE sizeof(stackval)
+
+
+
+
+void flush_icache ()
+{
+#if defined(_WIN32)
+ FlushInstructionCache(GetCurrentProcess(), NULL, 0);
+#else
+# if 0
+ asm ("mov r0, r0");
+ asm ("mov r0, #0");
+ asm ("mcr p15, 0, r0, c7, c7, 0");
+# else
+ /* TODO: use (movnv pc, rx) method */
+# endif
+#endif
+}
+
+
+void* alloc_code_buff (int num_instr)
+{
+ void* code_buff;
+ int code_size = num_instr * sizeof(arminstr_t);
+
+#if defined(_WIN32) || defined(UNDER_CE)
+ int old_prot = 0;
+
+ code_buff = malloc(code_size);
+ VirtualProtect(code_buff, code_size, PAGE_EXECUTE_READWRITE, &old_prot);
+#else
+ int page_size = sysconf(_SC_PAGESIZE);
+ int new_code_size;
+
+ new_code_size = code_size + page_size - 1;
+ code_buff = malloc(new_code_size);
+ code_buff = (void *) (((int) code_buff + page_size - 1) & ~(page_size - 1));
+
+ if (mprotect(code_buff, code_size, PROT_READ|PROT_WRITE|PROT_EXEC) != 0) {
+ g_critical (G_GNUC_PRETTY_FUNCTION
+ ": mprotect error: %s", g_strerror (errno));
+ }
+#endif
+
+ return code_buff;
+}
+
+
+/*
+ * Refer to ARM Procedure Call Standard (APCS) for more info.
+ */
+MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ MonoType* param;
+ MonoPIFunc code_buff;
+ arminstr_t* p;
+ guint32 code_size, stack_size;
+ guint32 simple_type;
+ int i, hasthis, aregs, regc, stack_offs;
+ int this_loaded;
+ guchar reg_alloc [ARM_NUM_ARG_REGS];
+
+ /* pessimistic estimation for prologue/epilogue size */
+ code_size = 16 + 16;
+ /* push/pop work regs */
+ code_size += 2;
+ /* call */
+ code_size += 2;
+ /* handle retval */
+ code_size += 2;
+
+ stack_size = 0;
+ hasthis = sig->hasthis ? 1 : 0;
+
+ aregs = ARM_NUM_ARG_REGS - hasthis;
+
+ for (i = 0, regc = aregs; i < sig->param_count; ++i) {
+ param = sig->params [i];
+
+ /* keep track of argument sizes */
+ if (i < ARM_NUM_ARG_REGS) reg_alloc [i] = 0;
+
+ if (param->byref) {
+ if (regc > 0) {
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ code_size += 2;
+ stack_size += sizeof(gpointer);
+ }
+ } else {
+ simple_type = param->type;
+enum_calc_size:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ /* keep track of argument sizes */
+ if (regc > 1) {
+ /* fits into registers, two LDRs */
+ code_size += 2;
+ reg_alloc [i] = regc;
+ regc -= 2;
+ } else if (regc > 0) {
+ /* first half fits into register, one LDR */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ /* the rest on the stack, LDR/STR */
+ code_size += 2;
+ stack_size += 4;
+ } else {
+ /* stack arg, 4 instrs - 2x(LDR/STR) */
+ code_size += 4;
+ stack_size += 2 * 4;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+
+ if (mono_class_value_size(param->data.klass, NULL) != 4) {
+ g_error("can only marshal enums, not generic structures (size: %d)", mono_class_value_size(param->data.klass, NULL));
+ }
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ default :
+ break;
+ }
+ }
+ }
+
+ code_buff = (MonoPIFunc)alloc_code_buff(code_size);
+ p = (arminstr_t*)code_buff;
+
+ /* prologue */
+ p = arm_emit_lean_prologue(p, stack_size,
+ /* save workset (r4-r7) */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+
+ /* copy args into workset */
+ /* callme - always present */
+ ARM_MOV_REG_REG(p, ARMREG_R4, ARMREG_A1);
+ /* retval */
+ if (sig->ret->byref || string_ctor || (sig->ret->type != MONO_TYPE_VOID)) {
+ ARM_MOV_REG_REG(p, ARMREG_R5, ARMREG_A2);
+ }
+ /* this_obj */
+ if (sig->hasthis) {
+ this_loaded = 0;
+ if (stack_size == 0) {
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_A3);
+ this_loaded = 1;
+ } else {
+ ARM_MOV_REG_REG(p, ARMREG_R6, ARMREG_A3);
+ }
+ }
+ /* args */
+ if (sig->param_count != 0) {
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_A4);
+ }
+
+ stack_offs = stack_size;
+
+ /* handle arguments */
+ /* in reverse order so we could use r0 (arg1) for memory transfers */
+ for (i = sig->param_count; --i >= 0;) {
+ param = sig->params [i];
+ if (param->byref) {
+ if (i < aregs && reg_alloc[i] > 0) {
+ ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ } else {
+ simple_type = param->type;
+enum_marshal:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (i < aregs && reg_alloc [i] > 0) {
+ /* pass in register */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (i < aregs && reg_alloc [i] > 0) {
+ if (reg_alloc [i] > 1) {
+ /* pass in registers */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]) + 1, REG_ARGP, i*ARG_SIZE + 4);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ }
+ } else {
+ /* two words transferred on the stack */
+ stack_offs -= 2*sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs + 4);
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ /* it's an enum value, proceed based on its base type */
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_marshal;
+ } else {
+ if (i < aregs && reg_alloc[i] > 0) {
+ int vtreg = ARMREG_A1 + hasthis +
+ hasthis + (aregs - reg_alloc[i]);
+ ARM_LDR_IMM(p, vtreg, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, vtreg, vtreg, 0);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R0, 0);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (sig->hasthis && !this_loaded) {
+ /* [this] always passed in A1, regardless of sig->call_convention */
+ ARM_MOV_REG_REG(p, ARMREG_A1, REG_THIS);
+ }
+
+ /* call [func] */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, REG_FUNC_ADDR);
+
+ /* handle retval */
+ if (sig->ret->byref || string_ctor) {
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ } else {
+ simple_type = sig->ret->type;
+enum_retvalue:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_STRB_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_STRH_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 32-bit integer and integer-equivalent return value
+ * is returned in R0.
+ * Single-precision floating-point values are returned in R0.
+ */
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 64-bit integer is returned in R0 and R1.
+ * Double-precision floating-point values are returned in R0 and R1.
+ */
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ ARM_STR_IMM(p, ARMREG_R1, REG_RETVAL, 4);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simple_type = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ break;
+ }
+ }
+
+ p = arm_emit_std_epilogue(p, stack_size,
+ /* restore R4-R7 */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ return code_buff;
+}
+
+
+
+#define MINV_OFFS(member) G_STRUCT_OFFSET(MonoInvocation, member)
+
+
+
+/*
+ * Returns a pointer to a native function that can be used to
+ * call the specified method.
+ * The function created will receive the arguments according
+ * to the call convention specified in the method.
+ * This function works by creating a MonoInvocation structure,
+ * filling the fields in and calling ves_exec_method on it.
+ * Still need to figure out how to handle the exception stuff
+ * across the managed/unmanaged boundary.
+ */
+void* mono_arch_create_method_pointer (MonoMethod* method)
+{
+ MonoMethodSignature* sig;
+ guchar* p, * p_method, * p_stackval_from_data, * p_exec;
+ void* code_buff;
+ int i, stack_size, arg_pos, arg_add, stackval_pos, offs;
+ int areg, reg_args, shift, pos;
+ MonoJitInfo *ji;
+
+ code_buff = alloc_code_buff(128);
+ p = (guchar*)code_buff;
+
+ sig = method->signature;
+
+ ARM_B(p, 3);
+
+ /* embed magic number followed by method pointer */
+ *p++ = 'M';
+ *p++ = 'o';
+ *p++ = 'n';
+ *p++ = 'o';
+ /* method ptr */
+ *(void**)p = method;
+ p_method = p;
+ p += 4;
+
+ /* call table */
+ *(void**)p = stackval_from_data;
+ p_stackval_from_data = p;
+ p += 4;
+ *(void**)p = ves_exec_method;
+ p_exec = p;
+ p += 4;
+
+ stack_size = sizeof(MonoInvocation) + ARG_SIZE*(sig->param_count + 1) + ARM_NUM_ARG_REGS*2*sizeof(armword_t);
+
+ /* prologue */
+ p = (guchar*)arm_emit_lean_prologue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ /* R7 - ptr to stack args */
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_IP);
+
+ /*
+ * Initialize MonoInvocation fields, first the ones known now.
+ */
+ ARM_MOV_REG_IMM8(p, ARMREG_R4, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex_handler));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(parent));
+
+ /* Set the method pointer. */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, -(int)(p - p_method + sizeof(arminstr_t)*2));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(method));
+
+ if (sig->hasthis) {
+ /* [this] in A1 */
+ ARM_STR_IMM(p, ARMREG_A1, ARMREG_SP, MINV_OFFS(obj));
+ } else {
+ /* else set minv.obj to NULL */
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(obj));
+ }
+
+ /* copy args from registers to stack */
+ areg = ARMREG_A1 + sig->hasthis;
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ arg_add = 0;
+ for (i = 0; i < sig->param_count; ++i) {
+ if (areg >= ARM_NUM_ARG_REGS) break;
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos);
+ ++areg;
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (areg >= ARM_NUM_ARG_REGS) {
+ /* load second half of 64-bit arg */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_R7, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_R7, arg_pos + sizeof(armword_t));
+ arg_add = sizeof(armword_t);
+ } else {
+ /* second half is already the register */
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos + sizeof(armword_t));
+ ++areg;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ arg_pos += 2 * sizeof(armword_t);
+ }
+ /* number of args passed in registers */
+ reg_args = i;
+
+
+
+ /*
+ * Calc and save stack args ptr,
+ * args follow MonoInvocation struct on the stack.
+ */
+ ARM_ADD_REG_IMM8(p, ARMREG_R1, ARMREG_SP, sizeof(MonoInvocation));
+ ARM_STR_IMM(p, ARMREG_R1, ARMREG_SP, MINV_OFFS(stack_args));
+
+ /* convert method args to stackvals */
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ stackval_pos = sizeof(MonoInvocation);
+ for (i = 0; i < sig->param_count; ++i) {
+ if (i < reg_args) {
+ ARM_SUB_REG_IMM8(p, ARMREG_A3, ARMREG_R7, -arg_pos);
+ arg_pos += 2 * sizeof(armword_t);
+ } else {
+ if (arg_pos < 0) arg_pos = 0;
+ pos = arg_pos + arg_add;
+ if (pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A3, ARMREG_R7, pos);
+ } else {
+ if (is_arm_const((armword_t)pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A3, ARMREG_R7, pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_R7, ARMREG_R6);
+ }
+ }
+ arg_pos += sizeof(armword_t);
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ arg_pos += sizeof(armword_t);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ }
+
+ /* A2 = result */
+ if (stackval_pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A2, ARMREG_SP, stackval_pos);
+ } else {
+ if (is_arm_const((armword_t)stackval_pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)stackval_pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A2, ARMREG_SP, stackval_pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_SP, ARMREG_R6);
+ }
+ }
+
+ /* A1 = type */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_A1, (armword_t)sig->params [i]);
+
+ stackval_pos += ARG_SIZE;
+
+ offs = -(p + 2*sizeof(arminstr_t) - p_stackval_from_data);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call stackval_from_data */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+ }
+
+ /* store retval ptr */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R5, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_R5, ARMREG_SP, ARMREG_R4);
+ ARM_STR_IMM(p, ARMREG_R5, ARMREG_SP, MINV_OFFS(retval));
+
+ /*
+ * Call the method.
+ */
+ /* A1 = MonoInvocation ptr */
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_SP);
+ offs = -(p + 2*sizeof(arminstr_t) - p_exec);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call ves_exec */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+
+
+ /*
+ * Move retval into reg.
+ */
+ if (sig->ret->byref) {
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ } else {
+ switch (sig->ret->type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_LDRB_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_LDRH_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ ARM_LDR_IMM(p, ARMREG_R1, ARMREG_R5, 4);
+ break;
+ case MONO_TYPE_VOID:
+ default:
+ break;
+ }
+ }
+
+
+ p = (guchar*)arm_emit_std_epilogue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ ji = g_new0(MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = ((guint8 *) p) - ((guint8 *) code_buff);
+ ji->code_start = (gpointer) code_buff;
+
+ mono_jit_info_table_add(mono_get_root_domain (), ji);
+
+ return code_buff;
+}
+
+
+/*
+ * mono_create_method_pointer () will insert a pointer to the MonoMethod
+ * so that the interp can easily get at the data: this function will retrieve
+ * the method from the code stream.
+ */
+MonoMethod* mono_method_pointer_get (void* code)
+{
+ unsigned char* c = code;
+ /* check out magic number that follows unconditional branch */
+ if (c[4] == 'M' &&
+ c[5] == 'o' &&
+ c[6] == 'n' &&
+ c[7] == 'o') return ((MonoMethod**)code)[2];
+ return NULL;
+}
+#endif
diff --git a/src/arch/arm/vfp_macros.th b/src/arch/arm/vfp_macros.th
new file mode 100644
index 0000000..cca67dc
--- /dev/null
+++ b/src/arch/arm/vfp_macros.th
@@ -0,0 +1,15 @@
+/* -- <Op> -- */
+
+
+/* Fd := Fn <Op> Fm */
+#define ARM_VFP_<Op>D_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>D(p, rd, rn, rm) \
+ ARM_VFP_<Op>D_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#define ARM_VFP_<Op>S_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>S(p, rd, rn, rm) \
+ ARM_VFP_<Op>S_COND(p, rd, rn, rm, ARMCOND_AL)
+
+
diff --git a/src/arch/arm/vfpm_macros.th b/src/arch/arm/vfpm_macros.th
new file mode 100644
index 0000000..25ad721
--- /dev/null
+++ b/src/arch/arm/vfpm_macros.th
@@ -0,0 +1,14 @@
+/* -- <Op> -- */
+
+
+/* Fd := <Op> Fm */
+
+#define ARM_<Op>D_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>D(p,dreg,sreg) ARM_<Op>D_COND(p,dreg,sreg,ARMCOND_AL)
+
+#define ARM_<Op>S_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>S(p,dreg,sreg) ARM_<Op>S_COND(p,dreg,sreg,ARMCOND_AL)
+
+
diff --git a/src/arch/arm/vfpops.sh b/src/arch/arm/vfpops.sh
new file mode 100755
index 0000000..bed4a9c
--- /dev/null
+++ b/src/arch/arm/vfpops.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+DYADIC="ADD SUB MUL NMUL DIV"
+MONADIC="CPY ABS NEG SQRT CMP CMPE CMPZ CMPEZ CVT UITO SITO TOUI TOSI TOUIZ TOSIZ"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+echo -e "/* Macros for VFP ops, auto-generated from template */\n"
+
+echo -e "\n/* dyadic */\n"
+gen "$DYADIC" vfp_macros
+
+echo -e "\n/* monadic */\n"
+gen "$MONADIC" vfpm_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* end generated */\n"
OpenPOWER on IntegriCloud